net/bnxt: synchronize between flow related functions
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
22
23 static int
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25                         const struct rte_flow_item pattern[],
26                         const struct rte_flow_action actions[],
27                         struct rte_flow_error *error)
28 {
29         if (!pattern) {
30                 rte_flow_error_set(error,
31                                    EINVAL,
32                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
33                                    NULL,
34                                    "NULL pattern.");
35                 return -rte_errno;
36         }
37
38         if (!actions) {
39                 rte_flow_error_set(error,
40                                    EINVAL,
41                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
42                                    NULL,
43                                    "NULL action.");
44                 return -rte_errno;
45         }
46
47         if (!attr) {
48                 rte_flow_error_set(error,
49                                    EINVAL,
50                                    RTE_FLOW_ERROR_TYPE_ATTR,
51                                    NULL,
52                                    "NULL attribute.");
53                 return -rte_errno;
54         }
55
56         return 0;
57 }
58
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
61 {
62         while (1) {
63                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
64                         return cur;
65                 cur++;
66         }
67 }
68
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
71 {
72         while (1) {
73                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
74                         return cur;
75                 cur++;
76         }
77 }
78
79 static int
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81                        struct rte_flow_error *error __rte_unused)
82 {
83         const struct rte_flow_item *item =
84                 bnxt_flow_non_void_item(pattern);
85         int use_ntuple = 1;
86         bool has_vlan = 0;
87
88         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89                 switch (item->type) {
90                 case RTE_FLOW_ITEM_TYPE_ANY:
91                 case RTE_FLOW_ITEM_TYPE_ETH:
92                         use_ntuple = 0;
93                         break;
94                 case RTE_FLOW_ITEM_TYPE_VLAN:
95                         use_ntuple = 0;
96                         has_vlan = 1;
97                         break;
98                 case RTE_FLOW_ITEM_TYPE_IPV4:
99                 case RTE_FLOW_ITEM_TYPE_IPV6:
100                 case RTE_FLOW_ITEM_TYPE_TCP:
101                 case RTE_FLOW_ITEM_TYPE_UDP:
102                         /* FALLTHROUGH */
103                         /* need ntuple match, reset exact match */
104                         use_ntuple |= 1;
105                         break;
106                 default:
107                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
108                         use_ntuple |= 0;
109                 }
110                 item++;
111         }
112
113         if (has_vlan && use_ntuple) {
114                 PMD_DRV_LOG(ERR,
115                             "VLAN flow cannot use NTUPLE filter\n");
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ITEM,
118                                    item,
119                                    "Cannot use VLAN with NTUPLE");
120                 return -rte_errno;
121         }
122
123         return use_ntuple;
124 }
125
126 static int
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128                                   const struct rte_flow_attr *attr,
129                                   const struct rte_flow_item pattern[],
130                                   struct rte_flow_error *error,
131                                   struct bnxt_filter_info *filter)
132 {
133         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138         const struct rte_flow_item_udp *udp_spec, *udp_mask;
139         const struct rte_flow_item_eth *eth_spec, *eth_mask;
140         const struct rte_flow_item_nvgre *nvgre_spec;
141         const struct rte_flow_item_nvgre *nvgre_mask;
142         const struct rte_flow_item_gre *gre_spec;
143         const struct rte_flow_item_gre *gre_mask;
144         const struct rte_flow_item_vxlan *vxlan_spec;
145         const struct rte_flow_item_vxlan *vxlan_mask;
146         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148         const struct rte_flow_item_vf *vf_spec;
149         uint32_t tenant_id_be = 0, valid_flags = 0;
150         bool vni_masked = 0;
151         bool tni_masked = 0;
152         uint32_t en_ethertype;
153         uint8_t inner = 0;
154         uint32_t vf = 0;
155         uint32_t en = 0;
156         int use_ntuple;
157         int dflt_vnic;
158
159         use_ntuple = bnxt_filter_type_check(pattern, error);
160         if (use_ntuple < 0)
161                 return use_ntuple;
162         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
163
164         filter->filter_type = use_ntuple ?
165                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166         en_ethertype = use_ntuple ?
167                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169
170         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171                 if (item->last) {
172                         /* last or range is NOT supported as match criteria */
173                         rte_flow_error_set(error, EINVAL,
174                                            RTE_FLOW_ERROR_TYPE_ITEM,
175                                            item,
176                                            "No support for range");
177                         return -rte_errno;
178                 }
179
180                 if (!item->spec || !item->mask) {
181                         rte_flow_error_set(error, EINVAL,
182                                            RTE_FLOW_ERROR_TYPE_ITEM,
183                                            item,
184                                            "spec/mask is NULL");
185                         return -rte_errno;
186                 }
187
188                 switch (item->type) {
189                 case RTE_FLOW_ITEM_TYPE_ANY:
190                         inner =
191                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
192                         if (inner)
193                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
194                         break;
195                 case RTE_FLOW_ITEM_TYPE_ETH:
196                         if (!item->spec || !item->mask)
197                                 break;
198
199                         eth_spec = item->spec;
200                         eth_mask = item->mask;
201
202                         /* Source MAC address mask cannot be partially set.
203                          * Should be All 0's or all 1's.
204                          * Destination MAC address mask must not be partially
205                          * set. Should be all 1's or all 0's.
206                          */
207                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
208                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
209                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
210                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
211                                 rte_flow_error_set(error,
212                                                    EINVAL,
213                                                    RTE_FLOW_ERROR_TYPE_ITEM,
214                                                    item,
215                                                    "MAC_addr mask not valid");
216                                 return -rte_errno;
217                         }
218
219                         /* Mask is not allowed. Only exact matches are */
220                         if (eth_mask->type &&
221                             eth_mask->type != RTE_BE16(0xffff)) {
222                                 rte_flow_error_set(error, EINVAL,
223                                                    RTE_FLOW_ERROR_TYPE_ITEM,
224                                                    item,
225                                                    "ethertype mask not valid");
226                                 return -rte_errno;
227                         }
228
229                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
230                                 if (!rte_is_unicast_ether_addr(&eth_spec->dst)) {
231                                         rte_flow_error_set(error,
232                                                            EINVAL,
233                                                            RTE_FLOW_ERROR_TYPE_ITEM,
234                                                            item,
235                                                            "DMAC is invalid");
236                                         return -rte_errno;
237                                 }
238                                 rte_memcpy(filter->dst_macaddr,
239                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
240                                 en |= use_ntuple ?
241                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
242                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
243                                 valid_flags |= inner ?
244                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
245                                         BNXT_FLOW_L2_DST_VALID_FLAG;
246                                 filter->priority = attr->priority;
247                                 PMD_DRV_LOG(DEBUG,
248                                             "Creating a priority flow\n");
249                         }
250
251                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
252                                 if (!rte_is_unicast_ether_addr(&eth_spec->src)) {
253                                         rte_flow_error_set(error,
254                                                            EINVAL,
255                                                            RTE_FLOW_ERROR_TYPE_ITEM,
256                                                            item,
257                                                            "SMAC is invalid");
258                                         return -rte_errno;
259                                 }
260                                 rte_memcpy(filter->src_macaddr,
261                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
262                                 en |= use_ntuple ?
263                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265                                 valid_flags |= inner ?
266                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
268                         } /*
269                            * else {
270                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
271                            * }
272                            */
273                         if (eth_mask->type) {
274                                 filter->ethertype =
275                                         rte_be_to_cpu_16(eth_spec->type);
276                                 en |= en_ethertype;
277                         }
278
279                         break;
280                 case RTE_FLOW_ITEM_TYPE_VLAN:
281                         vlan_spec = item->spec;
282                         vlan_mask = item->mask;
283                         if (en & en_ethertype) {
284                                 rte_flow_error_set(error, EINVAL,
285                                                    RTE_FLOW_ERROR_TYPE_ITEM,
286                                                    item,
287                                                    "VLAN TPID matching is not"
288                                                    " supported");
289                                 return -rte_errno;
290                         }
291                         if (vlan_mask->tci &&
292                             vlan_mask->tci == RTE_BE16(0x0fff)) {
293                                 /* Only the VLAN ID can be matched. */
294                                 filter->l2_ovlan =
295                                         rte_be_to_cpu_16(vlan_spec->tci &
296                                                          RTE_BE16(0x0fff));
297                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
298                         } else {
299                                 rte_flow_error_set(error,
300                                                    EINVAL,
301                                                    RTE_FLOW_ERROR_TYPE_ITEM,
302                                                    item,
303                                                    "VLAN mask is invalid");
304                                 return -rte_errno;
305                         }
306                         if (vlan_mask->inner_type &&
307                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
308                                 rte_flow_error_set(error, EINVAL,
309                                                    RTE_FLOW_ERROR_TYPE_ITEM,
310                                                    item,
311                                                    "inner ethertype mask not"
312                                                    " valid");
313                                 return -rte_errno;
314                         }
315                         if (vlan_mask->inner_type) {
316                                 filter->ethertype =
317                                         rte_be_to_cpu_16(vlan_spec->inner_type);
318                                 en |= en_ethertype;
319                         }
320
321                         break;
322                 case RTE_FLOW_ITEM_TYPE_IPV4:
323                         /* If mask is not involved, we could use EM filters. */
324                         ipv4_spec = item->spec;
325                         ipv4_mask = item->mask;
326
327                         if (!item->spec || !item->mask)
328                                 break;
329
330                         /* Only IP DST and SRC fields are maskable. */
331                         if (ipv4_mask->hdr.version_ihl ||
332                             ipv4_mask->hdr.type_of_service ||
333                             ipv4_mask->hdr.total_length ||
334                             ipv4_mask->hdr.packet_id ||
335                             ipv4_mask->hdr.fragment_offset ||
336                             ipv4_mask->hdr.time_to_live ||
337                             ipv4_mask->hdr.next_proto_id ||
338                             ipv4_mask->hdr.hdr_checksum) {
339                                 rte_flow_error_set(error,
340                                                    EINVAL,
341                                                    RTE_FLOW_ERROR_TYPE_ITEM,
342                                                    item,
343                                                    "Invalid IPv4 mask.");
344                                 return -rte_errno;
345                         }
346
347                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
348                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
349
350                         if (use_ntuple)
351                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
352                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
353                         else
354                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
355                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
356
357                         if (ipv4_mask->hdr.src_addr) {
358                                 filter->src_ipaddr_mask[0] =
359                                         ipv4_mask->hdr.src_addr;
360                                 en |= !use_ntuple ? 0 :
361                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
362                         }
363
364                         if (ipv4_mask->hdr.dst_addr) {
365                                 filter->dst_ipaddr_mask[0] =
366                                         ipv4_mask->hdr.dst_addr;
367                                 en |= !use_ntuple ? 0 :
368                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
369                         }
370
371                         filter->ip_addr_type = use_ntuple ?
372                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
373                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
374
375                         if (ipv4_spec->hdr.next_proto_id) {
376                                 filter->ip_protocol =
377                                         ipv4_spec->hdr.next_proto_id;
378                                 if (use_ntuple)
379                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
380                                 else
381                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
382                         }
383                         break;
384                 case RTE_FLOW_ITEM_TYPE_IPV6:
385                         ipv6_spec = item->spec;
386                         ipv6_mask = item->mask;
387
388                         if (!item->spec || !item->mask)
389                                 break;
390
391                         /* Only IP DST and SRC fields are maskable. */
392                         if (ipv6_mask->hdr.vtc_flow ||
393                             ipv6_mask->hdr.payload_len ||
394                             ipv6_mask->hdr.proto ||
395                             ipv6_mask->hdr.hop_limits) {
396                                 rte_flow_error_set(error,
397                                                    EINVAL,
398                                                    RTE_FLOW_ERROR_TYPE_ITEM,
399                                                    item,
400                                                    "Invalid IPv6 mask.");
401                                 return -rte_errno;
402                         }
403
404                         if (use_ntuple)
405                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
406                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
407                         else
408                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
409                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
410
411                         rte_memcpy(filter->src_ipaddr,
412                                    ipv6_spec->hdr.src_addr, 16);
413                         rte_memcpy(filter->dst_ipaddr,
414                                    ipv6_spec->hdr.dst_addr, 16);
415
416                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
417                                                    16)) {
418                                 rte_memcpy(filter->src_ipaddr_mask,
419                                            ipv6_mask->hdr.src_addr, 16);
420                                 en |= !use_ntuple ? 0 :
421                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
422                         }
423
424                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
425                                                    16)) {
426                                 rte_memcpy(filter->dst_ipaddr_mask,
427                                            ipv6_mask->hdr.dst_addr, 16);
428                                 en |= !use_ntuple ? 0 :
429                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
430                         }
431
432                         filter->ip_addr_type = use_ntuple ?
433                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
434                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
435                         break;
436                 case RTE_FLOW_ITEM_TYPE_TCP:
437                         tcp_spec = item->spec;
438                         tcp_mask = item->mask;
439
440                         if (!item->spec || !item->mask)
441                                 break;
442
443                         /* Check TCP mask. Only DST & SRC ports are maskable */
444                         if (tcp_mask->hdr.sent_seq ||
445                             tcp_mask->hdr.recv_ack ||
446                             tcp_mask->hdr.data_off ||
447                             tcp_mask->hdr.tcp_flags ||
448                             tcp_mask->hdr.rx_win ||
449                             tcp_mask->hdr.cksum ||
450                             tcp_mask->hdr.tcp_urp) {
451                                 rte_flow_error_set(error,
452                                                    EINVAL,
453                                                    RTE_FLOW_ERROR_TYPE_ITEM,
454                                                    item,
455                                                    "Invalid TCP mask");
456                                 return -rte_errno;
457                         }
458
459                         filter->src_port = tcp_spec->hdr.src_port;
460                         filter->dst_port = tcp_spec->hdr.dst_port;
461
462                         if (use_ntuple)
463                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
464                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
465                         else
466                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
467                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
468
469                         if (tcp_mask->hdr.dst_port) {
470                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
471                                 en |= !use_ntuple ? 0 :
472                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
473                         }
474
475                         if (tcp_mask->hdr.src_port) {
476                                 filter->src_port_mask = tcp_mask->hdr.src_port;
477                                 en |= !use_ntuple ? 0 :
478                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
479                         }
480                         break;
481                 case RTE_FLOW_ITEM_TYPE_UDP:
482                         udp_spec = item->spec;
483                         udp_mask = item->mask;
484
485                         if (!item->spec || !item->mask)
486                                 break;
487
488                         if (udp_mask->hdr.dgram_len ||
489                             udp_mask->hdr.dgram_cksum) {
490                                 rte_flow_error_set(error,
491                                                    EINVAL,
492                                                    RTE_FLOW_ERROR_TYPE_ITEM,
493                                                    item,
494                                                    "Invalid UDP mask");
495                                 return -rte_errno;
496                         }
497
498                         filter->src_port = udp_spec->hdr.src_port;
499                         filter->dst_port = udp_spec->hdr.dst_port;
500
501                         if (use_ntuple)
502                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
503                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
504                         else
505                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
506                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
507
508                         if (udp_mask->hdr.dst_port) {
509                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
510                                 en |= !use_ntuple ? 0 :
511                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
512                         }
513
514                         if (udp_mask->hdr.src_port) {
515                                 filter->src_port_mask = udp_mask->hdr.src_port;
516                                 en |= !use_ntuple ? 0 :
517                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
518                         }
519                         break;
520                 case RTE_FLOW_ITEM_TYPE_VXLAN:
521                         vxlan_spec = item->spec;
522                         vxlan_mask = item->mask;
523                         /* Check if VXLAN item is used to describe protocol.
524                          * If yes, both spec and mask should be NULL.
525                          * If no, both spec and mask shouldn't be NULL.
526                          */
527                         if ((!vxlan_spec && vxlan_mask) ||
528                             (vxlan_spec && !vxlan_mask)) {
529                                 rte_flow_error_set(error,
530                                                    EINVAL,
531                                                    RTE_FLOW_ERROR_TYPE_ITEM,
532                                                    item,
533                                                    "Invalid VXLAN item");
534                                 return -rte_errno;
535                         }
536
537                         if (!vxlan_spec && !vxlan_mask) {
538                                 filter->tunnel_type =
539                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
540                                 break;
541                         }
542
543                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
544                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
545                             vxlan_spec->flags != 0x8) {
546                                 rte_flow_error_set(error,
547                                                    EINVAL,
548                                                    RTE_FLOW_ERROR_TYPE_ITEM,
549                                                    item,
550                                                    "Invalid VXLAN item");
551                                 return -rte_errno;
552                         }
553
554                         /* Check if VNI is masked. */
555                         if (vxlan_spec && vxlan_mask) {
556                                 vni_masked =
557                                         !!memcmp(vxlan_mask->vni, vni_mask,
558                                                  RTE_DIM(vni_mask));
559                                 if (vni_masked) {
560                                         rte_flow_error_set
561                                                 (error,
562                                                  EINVAL,
563                                                  RTE_FLOW_ERROR_TYPE_ITEM,
564                                                  item,
565                                                  "Invalid VNI mask");
566                                         return -rte_errno;
567                                 }
568
569                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
570                                            vxlan_spec->vni, 3);
571                                 filter->vni =
572                                         rte_be_to_cpu_32(tenant_id_be);
573                                 filter->tunnel_type =
574                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
575                         }
576                         break;
577                 case RTE_FLOW_ITEM_TYPE_NVGRE:
578                         nvgre_spec = item->spec;
579                         nvgre_mask = item->mask;
580                         /* Check if NVGRE item is used to describe protocol.
581                          * If yes, both spec and mask should be NULL.
582                          * If no, both spec and mask shouldn't be NULL.
583                          */
584                         if ((!nvgre_spec && nvgre_mask) ||
585                             (nvgre_spec && !nvgre_mask)) {
586                                 rte_flow_error_set(error,
587                                                    EINVAL,
588                                                    RTE_FLOW_ERROR_TYPE_ITEM,
589                                                    item,
590                                                    "Invalid NVGRE item");
591                                 return -rte_errno;
592                         }
593
594                         if (!nvgre_spec && !nvgre_mask) {
595                                 filter->tunnel_type =
596                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
597                                 break;
598                         }
599
600                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
601                             nvgre_spec->protocol != 0x6558) {
602                                 rte_flow_error_set(error,
603                                                    EINVAL,
604                                                    RTE_FLOW_ERROR_TYPE_ITEM,
605                                                    item,
606                                                    "Invalid NVGRE item");
607                                 return -rte_errno;
608                         }
609
610                         if (nvgre_spec && nvgre_mask) {
611                                 tni_masked =
612                                         !!memcmp(nvgre_mask->tni, tni_mask,
613                                                  RTE_DIM(tni_mask));
614                                 if (tni_masked) {
615                                         rte_flow_error_set
616                                                 (error,
617                                                  EINVAL,
618                                                  RTE_FLOW_ERROR_TYPE_ITEM,
619                                                  item,
620                                                  "Invalid TNI mask");
621                                         return -rte_errno;
622                                 }
623                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
624                                            nvgre_spec->tni, 3);
625                                 filter->vni =
626                                         rte_be_to_cpu_32(tenant_id_be);
627                                 filter->tunnel_type =
628                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
629                         }
630                         break;
631
632                 case RTE_FLOW_ITEM_TYPE_GRE:
633                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
634                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
635
636                         /*
637                          *Check if GRE item is used to describe protocol.
638                          * If yes, both spec and mask should be NULL.
639                          * If no, both spec and mask shouldn't be NULL.
640                          */
641                         if (!!gre_spec ^ !!gre_mask) {
642                                 rte_flow_error_set(error, EINVAL,
643                                                    RTE_FLOW_ERROR_TYPE_ITEM,
644                                                    item,
645                                                    "Invalid GRE item");
646                                 return -rte_errno;
647                         }
648
649                         if (!gre_spec && !gre_mask) {
650                                 filter->tunnel_type =
651                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
652                                 break;
653                         }
654                         break;
655
656                 case RTE_FLOW_ITEM_TYPE_VF:
657                         vf_spec = item->spec;
658                         vf = vf_spec->id;
659                         if (!BNXT_PF(bp)) {
660                                 rte_flow_error_set(error,
661                                                    EINVAL,
662                                                    RTE_FLOW_ERROR_TYPE_ITEM,
663                                                    item,
664                                                    "Configuring on a VF!");
665                                 return -rte_errno;
666                         }
667
668                         if (vf >= bp->pdev->max_vfs) {
669                                 rte_flow_error_set(error,
670                                                    EINVAL,
671                                                    RTE_FLOW_ERROR_TYPE_ITEM,
672                                                    item,
673                                                    "Incorrect VF id!");
674                                 return -rte_errno;
675                         }
676
677                         if (!attr->transfer) {
678                                 rte_flow_error_set(error,
679                                                    ENOTSUP,
680                                                    RTE_FLOW_ERROR_TYPE_ITEM,
681                                                    item,
682                                                    "Matching VF traffic without"
683                                                    " affecting it (transfer attribute)"
684                                                    " is unsupported");
685                                 return -rte_errno;
686                         }
687
688                         filter->mirror_vnic_id =
689                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
690                         if (dflt_vnic < 0) {
691                                 /* This simply indicates there's no driver
692                                  * loaded. This is not an error.
693                                  */
694                                 rte_flow_error_set
695                                         (error,
696                                          EINVAL,
697                                          RTE_FLOW_ERROR_TYPE_ITEM,
698                                          item,
699                                          "Unable to get default VNIC for VF");
700                                 return -rte_errno;
701                         }
702
703                         filter->mirror_vnic_id = dflt_vnic;
704                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
705                         break;
706                 default:
707                         break;
708                 }
709                 item++;
710         }
711         filter->enables = en;
712         filter->valid_flags = valid_flags;
713
714         return 0;
715 }
716
717 /* Parse attributes */
718 static int
719 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
720                      struct rte_flow_error *error)
721 {
722         /* Must be input direction */
723         if (!attr->ingress) {
724                 rte_flow_error_set(error,
725                                    EINVAL,
726                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
727                                    attr,
728                                    "Only support ingress.");
729                 return -rte_errno;
730         }
731
732         /* Not supported */
733         if (attr->egress) {
734                 rte_flow_error_set(error,
735                                    EINVAL,
736                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
737                                    attr,
738                                    "No support for egress.");
739                 return -rte_errno;
740         }
741
742         return 0;
743 }
744
745 static struct bnxt_filter_info *
746 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
747 {
748         struct bnxt_filter_info *mf, *f0;
749         struct bnxt_vnic_info *vnic0;
750         struct rte_flow *flow;
751         int i;
752
753         vnic0 = &bp->vnic_info[0];
754         f0 = STAILQ_FIRST(&vnic0->filter);
755
756         /* This flow has same DST MAC as the port/l2 filter. */
757         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
758                 return f0;
759
760         for (i = bp->max_vnics - 1; i >= 0; i--) {
761                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
762
763                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
764                         continue;
765
766                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
767                         mf = flow->filter;
768
769                         if (mf->matching_l2_fltr_ptr)
770                                 continue;
771
772                         if (mf->ethertype == nf->ethertype &&
773                             mf->l2_ovlan == nf->l2_ovlan &&
774                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
775                             mf->l2_ivlan == nf->l2_ivlan &&
776                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
777                             !memcmp(mf->src_macaddr, nf->src_macaddr,
778                                     RTE_ETHER_ADDR_LEN) &&
779                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
780                                     RTE_ETHER_ADDR_LEN))
781                                 return mf;
782                 }
783         }
784         return NULL;
785 }
786
787 static struct bnxt_filter_info *
788 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
789                       struct bnxt_vnic_info *vnic)
790 {
791         struct bnxt_filter_info *filter1;
792         int rc;
793
794         /* Alloc new L2 filter.
795          * This flow needs MAC filter which does not match any existing
796          * L2 filters.
797          */
798         filter1 = bnxt_get_unused_filter(bp);
799         if (filter1 == NULL)
800                 return NULL;
801
802         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
803         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
804         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
805             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
806                 filter1->flags |=
807                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
808                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
809         }
810
811         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
812             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
813              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
814                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
815                 filter1->flags |=
816                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
817                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
818         } else {
819                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
820                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
821         }
822
823         if (nf->priority &&
824             (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
825              nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
826                 /* Tell the FW where to place the filter in the table. */
827                 if (nf->priority > 65535) {
828                         filter1->pri_hint =
829                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
830                         /* This will place the filter in TCAM */
831                         filter1->l2_filter_id_hint = (uint64_t)-1;
832                 }
833         }
834
835         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
836                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
837         memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
838         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
839                                      filter1);
840         if (rc) {
841                 bnxt_free_filter(bp, filter1);
842                 return NULL;
843         }
844         filter1->l2_ref_cnt++;
845         return filter1;
846 }
847
848 struct bnxt_filter_info *
849 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
850                    struct bnxt_vnic_info *vnic)
851 {
852         struct bnxt_filter_info *l2_filter = NULL;
853
854         l2_filter = bnxt_find_matching_l2_filter(bp, nf);
855         if (l2_filter) {
856                 l2_filter->l2_ref_cnt++;
857                 nf->matching_l2_fltr_ptr = l2_filter;
858         } else {
859                 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
860                 nf->matching_l2_fltr_ptr = NULL;
861         }
862
863         return l2_filter;
864 }
865
866 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
867 {
868         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
869         uint64_t rx_offloads = dev_conf->rxmode.offloads;
870         int rc;
871
872         rc = bnxt_vnic_grp_alloc(bp, vnic);
873         if (rc)
874                 goto ret;
875
876         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
877         if (rc) {
878                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
879                 goto ret;
880         }
881         bp->nr_vnics++;
882
883         /* RSS context is required only when there is more than one RSS ring */
884         if (vnic->rx_queue_cnt > 1) {
885                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
886                 if (rc) {
887                         PMD_DRV_LOG(ERR,
888                                     "HWRM vnic ctx alloc failure: %x\n", rc);
889                         goto ret;
890                 }
891         } else {
892                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
893         }
894
895         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
896                 vnic->vlan_strip = true;
897         else
898                 vnic->vlan_strip = false;
899
900         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
901         if (rc)
902                 goto ret;
903
904         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
905
906 ret:
907         return rc;
908 }
909
910 static int match_vnic_rss_cfg(struct bnxt *bp,
911                               struct bnxt_vnic_info *vnic,
912                               const struct rte_flow_action_rss *rss)
913 {
914         unsigned int match = 0, i;
915
916         if (vnic->rx_queue_cnt != rss->queue_num)
917                 return -EINVAL;
918
919         for (i = 0; i < rss->queue_num; i++) {
920                 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
921                     !bp->rx_queues[rss->queue[i]]->rx_started)
922                         return -EINVAL;
923         }
924
925         for (i = 0; i < vnic->rx_queue_cnt; i++) {
926                 int j;
927
928                 for (j = 0; j < vnic->rx_queue_cnt; j++) {
929                         if (bp->grp_info[rss->queue[i]].fw_grp_id ==
930                             vnic->fw_grp_ids[j])
931                                 match++;
932                 }
933         }
934
935         if (match != vnic->rx_queue_cnt) {
936                 PMD_DRV_LOG(ERR,
937                             "VNIC queue count %d vs queues matched %d\n",
938                             match, vnic->rx_queue_cnt);
939                 return -EINVAL;
940         }
941
942         return 0;
943 }
944
945 static void
946 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
947                             struct bnxt_filter_info *filter1,
948                             int use_ntuple)
949 {
950         if (!use_ntuple &&
951             !(filter->valid_flags &
952               ~(BNXT_FLOW_L2_DST_VALID_FLAG |
953                 BNXT_FLOW_L2_SRC_VALID_FLAG |
954                 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
955                 BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
956                 filter->flags = filter1->flags;
957                 filter->enables = filter1->enables;
958                 filter->filter_type = HWRM_CFA_L2_FILTER;
959                 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
960                 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
961                 filter->pri_hint = filter1->pri_hint;
962                 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
963         }
964         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
965         filter->l2_ref_cnt = filter1->l2_ref_cnt;
966         PMD_DRV_LOG(DEBUG,
967                 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
968                 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
969 }
970
971 static int
972 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
973                              const struct rte_flow_item pattern[],
974                              const struct rte_flow_action actions[],
975                              const struct rte_flow_attr *attr,
976                              struct rte_flow_error *error,
977                              struct bnxt_filter_info *filter)
978 {
979         const struct rte_flow_action *act =
980                 bnxt_flow_non_void_action(actions);
981         struct bnxt *bp = dev->data->dev_private;
982         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
983         struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
984         const struct rte_flow_action_queue *act_q;
985         const struct rte_flow_action_vf *act_vf;
986         struct bnxt_filter_info *filter1 = NULL;
987         const struct rte_flow_action_rss *rss;
988         struct bnxt_rx_queue *rxq = NULL;
989         int dflt_vnic, vnic_id;
990         unsigned int rss_idx;
991         uint32_t vf = 0, i;
992         int rc, use_ntuple;
993
994         rc =
995         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
996         if (rc != 0)
997                 goto ret;
998
999         rc = bnxt_flow_parse_attr(attr, error);
1000         if (rc != 0)
1001                 goto ret;
1002
1003         /* Since we support ingress attribute only - right now. */
1004         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1005                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1006
1007         use_ntuple = bnxt_filter_type_check(pattern, error);
1008         switch (act->type) {
1009         case RTE_FLOW_ACTION_TYPE_QUEUE:
1010                 /* Allow this flow. Redirect to a VNIC. */
1011                 act_q = (const struct rte_flow_action_queue *)act->conf;
1012                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1013                         rte_flow_error_set(error,
1014                                            EINVAL,
1015                                            RTE_FLOW_ERROR_TYPE_ACTION,
1016                                            act,
1017                                            "Invalid queue ID.");
1018                         rc = -rte_errno;
1019                         goto ret;
1020                 }
1021                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1022
1023                 vnic_id = attr->group;
1024                 if (!vnic_id) {
1025                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1026                         vnic_id = act_q->index;
1027                 }
1028
1029                 vnic = &bp->vnic_info[vnic_id];
1030                 if (vnic == NULL) {
1031                         rte_flow_error_set(error,
1032                                            EINVAL,
1033                                            RTE_FLOW_ERROR_TYPE_ACTION,
1034                                            act,
1035                                            "No matching VNIC found.");
1036                         rc = -rte_errno;
1037                         goto ret;
1038                 }
1039                 if (vnic->rx_queue_cnt) {
1040                         if (vnic->start_grp_id != act_q->index) {
1041                                 PMD_DRV_LOG(ERR,
1042                                             "VNIC already in use\n");
1043                                 rte_flow_error_set(error,
1044                                                    EINVAL,
1045                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1046                                                    act,
1047                                                    "VNIC already in use");
1048                                 rc = -rte_errno;
1049                                 goto ret;
1050                         }
1051                         goto use_vnic;
1052                 }
1053
1054                 rxq = bp->rx_queues[act_q->index];
1055
1056                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1057                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
1058                         goto use_vnic;
1059
1060                 //if (!rxq ||
1061                     //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1062                     //INVALID_HW_RING_ID ||
1063                     //!rxq->rx_deferred_start) {
1064                 if (!rxq ||
1065                     bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1066                     INVALID_HW_RING_ID) {
1067                         PMD_DRV_LOG(ERR,
1068                                     "Queue invalid or used with other VNIC\n");
1069                         rte_flow_error_set(error,
1070                                            EINVAL,
1071                                            RTE_FLOW_ERROR_TYPE_ACTION,
1072                                            act,
1073                                            "Queue invalid queue or in use");
1074                         rc = -rte_errno;
1075                         goto ret;
1076                 }
1077
1078                 rxq->vnic = vnic;
1079                 rxq->rx_started = 1;
1080                 vnic->rx_queue_cnt++;
1081                 vnic->start_grp_id = act_q->index;
1082                 vnic->end_grp_id = act_q->index;
1083                 vnic->func_default = 0; //This is not a default VNIC.
1084
1085                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1086
1087                 rc = bnxt_vnic_prep(bp, vnic);
1088                 if (rc)  {
1089                         rte_flow_error_set(error,
1090                                            EINVAL,
1091                                            RTE_FLOW_ERROR_TYPE_ACTION,
1092                                            act,
1093                                            "VNIC prep fail");
1094                         rc = -rte_errno;
1095                         goto ret;
1096                 }
1097
1098                 PMD_DRV_LOG(DEBUG,
1099                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1100                             act_q->index, vnic, vnic->fw_grp_ids);
1101
1102 use_vnic:
1103                 vnic->ff_pool_idx = vnic_id;
1104                 PMD_DRV_LOG(DEBUG,
1105                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1106                 filter->dst_id = vnic->fw_vnic_id;
1107                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1108                 if (filter1 == NULL) {
1109                         rte_flow_error_set(error,
1110                                            ENOSPC,
1111                                            RTE_FLOW_ERROR_TYPE_ACTION,
1112                                            act,
1113                                            "Filter not available");
1114                         rc = -rte_errno;
1115                         goto ret;
1116                 }
1117
1118                 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1119                             filter, filter1, filter1->l2_ref_cnt);
1120                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1121                 break;
1122         case RTE_FLOW_ACTION_TYPE_DROP:
1123                 vnic0 = &bp->vnic_info[0];
1124                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1125                 if (filter1 == NULL) {
1126                         rc = -ENOSPC;
1127                         goto ret;
1128                 }
1129
1130                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1131                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1132                         filter->flags =
1133                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1134                 else
1135                         filter->flags =
1136                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1137                 break;
1138         case RTE_FLOW_ACTION_TYPE_COUNT:
1139                 vnic0 = &bp->vnic_info[0];
1140                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1141                 if (filter1 == NULL) {
1142                         rte_flow_error_set(error,
1143                                            ENOSPC,
1144                                            RTE_FLOW_ERROR_TYPE_ACTION,
1145                                            act,
1146                                            "New filter not available");
1147                         rc = -rte_errno;
1148                         goto ret;
1149                 }
1150
1151                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1152                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1153                 break;
1154         case RTE_FLOW_ACTION_TYPE_VF:
1155                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1156                 vf = act_vf->id;
1157
1158                 if (filter->tunnel_type ==
1159                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1160                     filter->tunnel_type ==
1161                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1162                         /* If issued on a VF, ensure id is 0 and is trusted */
1163                         if (BNXT_VF(bp)) {
1164                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1165                                         rte_flow_error_set(error, EINVAL,
1166                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1167                                                 act,
1168                                                 "Incorrect VF");
1169                                         rc = -rte_errno;
1170                                         goto ret;
1171                                 }
1172                         }
1173
1174                         filter->enables |= filter->tunnel_type;
1175                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1176                         goto done;
1177                 }
1178
1179                 if (vf >= bp->pdev->max_vfs) {
1180                         rte_flow_error_set(error,
1181                                            EINVAL,
1182                                            RTE_FLOW_ERROR_TYPE_ACTION,
1183                                            act,
1184                                            "Incorrect VF id!");
1185                         rc = -rte_errno;
1186                         goto ret;
1187                 }
1188
1189                 filter->mirror_vnic_id =
1190                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1191                 if (dflt_vnic < 0) {
1192                         /* This simply indicates there's no driver loaded.
1193                          * This is not an error.
1194                          */
1195                         rte_flow_error_set(error,
1196                                            EINVAL,
1197                                            RTE_FLOW_ERROR_TYPE_ACTION,
1198                                            act,
1199                                            "Unable to get default VNIC for VF");
1200                         rc = -rte_errno;
1201                         goto ret;
1202                 }
1203
1204                 filter->mirror_vnic_id = dflt_vnic;
1205                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1206
1207                 vnic0 = &bp->vnic_info[0];
1208                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1209                 if (filter1 == NULL) {
1210                         rte_flow_error_set(error,
1211                                            ENOSPC,
1212                                            RTE_FLOW_ERROR_TYPE_ACTION,
1213                                            act,
1214                                            "New filter not available");
1215                         rc = -ENOSPC;
1216                         goto ret;
1217                 }
1218
1219                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1220                 break;
1221         case RTE_FLOW_ACTION_TYPE_RSS:
1222                 rss = (const struct rte_flow_action_rss *)act->conf;
1223
1224                 vnic_id = attr->group;
1225                 if (!vnic_id) {
1226                         PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1227                         rte_flow_error_set(error,
1228                                            EINVAL,
1229                                            RTE_FLOW_ERROR_TYPE_ATTR,
1230                                            NULL,
1231                                            "Group id cannot be 0");
1232                         rc = -rte_errno;
1233                         goto ret;
1234                 }
1235
1236                 vnic = &bp->vnic_info[vnic_id];
1237                 if (vnic == NULL) {
1238                         rte_flow_error_set(error,
1239                                            EINVAL,
1240                                            RTE_FLOW_ERROR_TYPE_ACTION,
1241                                            act,
1242                                            "No matching VNIC for RSS group.");
1243                         rc = -rte_errno;
1244                         goto ret;
1245                 }
1246                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1247
1248                 /* Check if requested RSS config matches RSS config of VNIC
1249                  * only if it is not a fresh VNIC configuration.
1250                  * Otherwise the existing VNIC configuration can be used.
1251                  */
1252                 if (vnic->rx_queue_cnt) {
1253                         rc = match_vnic_rss_cfg(bp, vnic, rss);
1254                         if (rc) {
1255                                 PMD_DRV_LOG(ERR,
1256                                             "VNIC and RSS config mismatch\n");
1257                                 rte_flow_error_set(error,
1258                                                    EINVAL,
1259                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1260                                                    act,
1261                                                    "VNIC and RSS cfg mismatch");
1262                                 rc = -rte_errno;
1263                                 goto ret;
1264                         }
1265                         goto vnic_found;
1266                 }
1267
1268                 for (i = 0; i < rss->queue_num; i++) {
1269                         PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1270                                     rss->queue[i]);
1271
1272                         if (!rss->queue[i] ||
1273                             rss->queue[i] >= bp->rx_nr_rings ||
1274                             !bp->rx_queues[rss->queue[i]]) {
1275                                 rte_flow_error_set(error,
1276                                                    EINVAL,
1277                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1278                                                    act,
1279                                                    "Invalid queue ID for RSS");
1280                                 rc = -rte_errno;
1281                                 goto ret;
1282                         }
1283                         rxq = bp->rx_queues[rss->queue[i]];
1284
1285                         //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1286                             //INVALID_HW_RING_ID ||
1287                             //!rxq->rx_deferred_start) {
1288                         if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1289                             INVALID_HW_RING_ID) {
1290                                 PMD_DRV_LOG(ERR,
1291                                             "queue active with other VNIC\n");
1292                                 rte_flow_error_set(error,
1293                                                    EINVAL,
1294                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1295                                                    act,
1296                                                    "Invalid queue ID for RSS");
1297                                 rc = -rte_errno;
1298                                 goto ret;
1299                         }
1300
1301                         rxq->vnic = vnic;
1302                         rxq->rx_started = 1;
1303                         vnic->rx_queue_cnt++;
1304                 }
1305
1306                 vnic->start_grp_id = rss->queue[0];
1307                 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1308                 vnic->func_default = 0; //This is not a default VNIC.
1309
1310                 rc = bnxt_vnic_prep(bp, vnic);
1311                 if (rc) {
1312                         rte_flow_error_set(error,
1313                                            EINVAL,
1314                                            RTE_FLOW_ERROR_TYPE_ACTION,
1315                                            act,
1316                                            "VNIC prep fail");
1317                         rc = -rte_errno;
1318                         goto ret;
1319                 }
1320
1321                 PMD_DRV_LOG(DEBUG,
1322                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1323                             vnic_id, vnic, vnic->fw_grp_ids);
1324
1325                 vnic->ff_pool_idx = vnic_id;
1326                 PMD_DRV_LOG(DEBUG,
1327                             "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1328
1329                 /* This can be done only after vnic_grp_alloc is done. */
1330                 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1331                         vnic->fw_grp_ids[i] =
1332                                 bp->grp_info[rss->queue[i]].fw_grp_id;
1333                         /* Make sure vnic0 does not use these rings. */
1334                         bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1335                                 INVALID_HW_RING_ID;
1336                 }
1337
1338                 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1339                         for (i = 0; i < vnic->rx_queue_cnt; i++)
1340                                 vnic->rss_table[rss_idx++] =
1341                                         vnic->fw_grp_ids[i];
1342                 }
1343
1344                 /* Configure RSS only if the queue count is > 1 */
1345                 if (vnic->rx_queue_cnt > 1) {
1346                         vnic->hash_type =
1347                                 bnxt_rte_to_hwrm_hash_types(rss->types);
1348
1349                         if (!rss->key_len) {
1350                                 /* If hash key has not been specified,
1351                                  * use random hash key.
1352                                  */
1353                                 prandom_bytes(vnic->rss_hash_key,
1354                                               HW_HASH_KEY_SIZE);
1355                         } else {
1356                                 if (rss->key_len > HW_HASH_KEY_SIZE)
1357                                         memcpy(vnic->rss_hash_key,
1358                                                rss->key,
1359                                                HW_HASH_KEY_SIZE);
1360                                 else
1361                                         memcpy(vnic->rss_hash_key,
1362                                                rss->key,
1363                                                rss->key_len);
1364                         }
1365                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1366                 } else {
1367                         PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1368                 }
1369
1370 vnic_found:
1371                 filter->dst_id = vnic->fw_vnic_id;
1372                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1373                 if (filter1 == NULL) {
1374                         rte_flow_error_set(error,
1375                                            ENOSPC,
1376                                            RTE_FLOW_ERROR_TYPE_ACTION,
1377                                            act,
1378                                            "New filter not available");
1379                         rc = -ENOSPC;
1380                         goto ret;
1381                 }
1382
1383                 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1384                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1385                 break;
1386         default:
1387                 rte_flow_error_set(error,
1388                                    EINVAL,
1389                                    RTE_FLOW_ERROR_TYPE_ACTION,
1390                                    act,
1391                                    "Invalid action.");
1392                 rc = -rte_errno;
1393                 goto ret;
1394         }
1395
1396         if (filter1 && !filter->matching_l2_fltr_ptr) {
1397                 bnxt_free_filter(bp, filter1);
1398                 filter1->fw_l2_filter_id = -1;
1399         }
1400
1401 done:
1402         act = bnxt_flow_non_void_action(++act);
1403         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1404                 rte_flow_error_set(error,
1405                                    EINVAL,
1406                                    RTE_FLOW_ERROR_TYPE_ACTION,
1407                                    act,
1408                                    "Invalid action.");
1409                 rc = -rte_errno;
1410                 goto ret;
1411         }
1412
1413         return rc;
1414 ret:
1415
1416         //TODO: Cleanup according to ACTION TYPE.
1417         if (rte_errno)  {
1418                 if (vnic && STAILQ_EMPTY(&vnic->filter))
1419                         vnic->rx_queue_cnt = 0;
1420
1421                 if (rxq && !vnic->rx_queue_cnt)
1422                         rxq->vnic = &bp->vnic_info[0];
1423         }
1424         return rc;
1425 }
1426
1427 static
1428 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1429                                           struct bnxt_filter_info *filter)
1430 {
1431         struct bnxt_vnic_info *vnic = NULL;
1432         unsigned int i;
1433
1434         for (i = 0; i < bp->max_vnics; i++) {
1435                 vnic = &bp->vnic_info[i];
1436                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1437                     filter->dst_id == vnic->fw_vnic_id) {
1438                         PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1439                                     vnic->ff_pool_idx);
1440                         return vnic;
1441                 }
1442         }
1443         return NULL;
1444 }
1445
1446 static int
1447 bnxt_flow_validate(struct rte_eth_dev *dev,
1448                    const struct rte_flow_attr *attr,
1449                    const struct rte_flow_item pattern[],
1450                    const struct rte_flow_action actions[],
1451                    struct rte_flow_error *error)
1452 {
1453         struct bnxt *bp = dev->data->dev_private;
1454         struct bnxt_vnic_info *vnic = NULL;
1455         struct bnxt_filter_info *filter;
1456         int ret = 0;
1457
1458         bnxt_acquire_flow_lock(bp);
1459         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1460         if (ret != 0) {
1461                 bnxt_release_flow_lock(bp);
1462                 return ret;
1463         }
1464
1465         filter = bnxt_get_unused_filter(bp);
1466         if (filter == NULL) {
1467                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1468                 bnxt_release_flow_lock(bp);
1469                 return -ENOMEM;
1470         }
1471
1472         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1473                                            error, filter);
1474         if (ret)
1475                 goto exit;
1476
1477         vnic = find_matching_vnic(bp, filter);
1478         if (vnic) {
1479                 if (STAILQ_EMPTY(&vnic->filter)) {
1480                         rte_free(vnic->fw_grp_ids);
1481                         bnxt_hwrm_vnic_ctx_free(bp, vnic);
1482                         bnxt_hwrm_vnic_free(bp, vnic);
1483                         vnic->rx_queue_cnt = 0;
1484                         bp->nr_vnics--;
1485                         PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1486                 }
1487         }
1488
1489         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1490                 bnxt_hwrm_clear_em_filter(bp, filter);
1491         else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1492                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1493         else
1494                 bnxt_hwrm_clear_l2_filter(bp, filter);
1495
1496 exit:
1497         /* No need to hold on to this filter if we are just validating flow */
1498         filter->fw_l2_filter_id = UINT64_MAX;
1499         bnxt_free_filter(bp, filter);
1500         bnxt_release_flow_lock(bp);
1501
1502         return ret;
1503 }
1504
1505 static void
1506 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1507                    struct bnxt_filter_info *new_filter)
1508 {
1509         /* Clear the new L2 filter that was created in the previous step in
1510          * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1511          * filter which points to the new destination queue and so we clear
1512          * the previous L2 filter. For ntuple filters, we are going to reuse
1513          * the old L2 filter and create new NTUPLE filter with this new
1514          * destination queue subsequently during bnxt_flow_create.
1515          */
1516         if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1517                 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1518                 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1519         } else {
1520                 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1521                         bnxt_hwrm_clear_em_filter(bp, old_filter);
1522                 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1523                         bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1524         }
1525 }
1526
1527 static int
1528 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1529 {
1530         struct bnxt_filter_info *mf;
1531         struct rte_flow *flow;
1532         int i;
1533
1534         for (i = bp->max_vnics - 1; i >= 0; i--) {
1535                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1536
1537                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1538                         continue;
1539
1540                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1541                         mf = flow->filter;
1542
1543                         if (mf->filter_type == nf->filter_type &&
1544                             mf->flags == nf->flags &&
1545                             mf->src_port == nf->src_port &&
1546                             mf->src_port_mask == nf->src_port_mask &&
1547                             mf->dst_port == nf->dst_port &&
1548                             mf->dst_port_mask == nf->dst_port_mask &&
1549                             mf->ip_protocol == nf->ip_protocol &&
1550                             mf->ip_addr_type == nf->ip_addr_type &&
1551                             mf->ethertype == nf->ethertype &&
1552                             mf->vni == nf->vni &&
1553                             mf->tunnel_type == nf->tunnel_type &&
1554                             mf->l2_ovlan == nf->l2_ovlan &&
1555                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1556                             mf->l2_ivlan == nf->l2_ivlan &&
1557                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1558                             !memcmp(mf->l2_addr, nf->l2_addr,
1559                                     RTE_ETHER_ADDR_LEN) &&
1560                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1561                                     RTE_ETHER_ADDR_LEN) &&
1562                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1563                                     RTE_ETHER_ADDR_LEN) &&
1564                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1565                                     RTE_ETHER_ADDR_LEN) &&
1566                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1567                                     sizeof(nf->src_ipaddr)) &&
1568                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1569                                     sizeof(nf->src_ipaddr_mask)) &&
1570                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1571                                     sizeof(nf->dst_ipaddr)) &&
1572                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1573                                     sizeof(nf->dst_ipaddr_mask))) {
1574                                 if (mf->dst_id == nf->dst_id)
1575                                         return -EEXIST;
1576                                 /* Free the old filter, update flow
1577                                  * with new filter
1578                                  */
1579                                 bnxt_update_filter(bp, mf, nf);
1580                                 STAILQ_REMOVE(&vnic->filter, mf,
1581                                               bnxt_filter_info, next);
1582                                 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1583                                 bnxt_free_filter(bp, mf);
1584                                 flow->filter = nf;
1585                                 return -EXDEV;
1586                         }
1587                 }
1588         }
1589         return 0;
1590 }
1591
1592 static struct rte_flow *
1593 bnxt_flow_create(struct rte_eth_dev *dev,
1594                  const struct rte_flow_attr *attr,
1595                  const struct rte_flow_item pattern[],
1596                  const struct rte_flow_action actions[],
1597                  struct rte_flow_error *error)
1598 {
1599         struct bnxt *bp = dev->data->dev_private;
1600         struct bnxt_vnic_info *vnic = NULL;
1601         struct bnxt_filter_info *filter;
1602         bool update_flow = false;
1603         struct rte_flow *flow;
1604         int ret = 0;
1605         uint32_t tun_type;
1606
1607         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1608                 rte_flow_error_set(error, EINVAL,
1609                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1610                                    "Failed to create flow, Not a Trusted VF!");
1611                 return NULL;
1612         }
1613
1614         if (!dev->data->dev_started) {
1615                 rte_flow_error_set(error,
1616                                    EINVAL,
1617                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1618                                    NULL,
1619                                    "Device must be started");
1620                 return NULL;
1621         }
1622
1623         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1624         if (!flow) {
1625                 rte_flow_error_set(error, ENOMEM,
1626                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1627                                    "Failed to allocate memory");
1628                 return flow;
1629         }
1630
1631         bnxt_acquire_flow_lock(bp);
1632         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1633         if (ret != 0) {
1634                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1635                 goto free_flow;
1636         }
1637
1638         filter = bnxt_get_unused_filter(bp);
1639         if (filter == NULL) {
1640                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1641                 goto free_flow;
1642         }
1643
1644         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1645                                            error, filter);
1646         if (ret != 0)
1647                 goto free_filter;
1648
1649         ret = bnxt_match_filter(bp, filter);
1650         if (ret == -EEXIST) {
1651                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1652                 /* Clear the filter that was created as part of
1653                  * validate_and_parse_flow() above
1654                  */
1655                 bnxt_hwrm_clear_l2_filter(bp, filter);
1656                 goto free_filter;
1657         } else if (ret == -EXDEV) {
1658                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1659                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1660                 update_flow = true;
1661         }
1662
1663         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1664          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1665          * in such a case.
1666          */
1667         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1668             filter->enables == filter->tunnel_type) {
1669                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1670                 if (ret) {
1671                         rte_flow_error_set(error, -ret,
1672                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1673                                            "Unable to query tunnel to VF");
1674                         goto free_filter;
1675                 }
1676                 if (tun_type == (1U << filter->tunnel_type)) {
1677                         ret =
1678                         bnxt_hwrm_tunnel_redirect_free(bp,
1679                                                        filter->tunnel_type);
1680                         if (ret) {
1681                                 PMD_DRV_LOG(ERR,
1682                                             "Unable to free existing tunnel\n");
1683                                 rte_flow_error_set(error, -ret,
1684                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1685                                                    NULL,
1686                                                    "Unable to free preexisting "
1687                                                    "tunnel on VF");
1688                                 goto free_filter;
1689                         }
1690                 }
1691                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1692                 if (ret) {
1693                         rte_flow_error_set(error, -ret,
1694                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1695                                            "Unable to redirect tunnel to VF");
1696                         goto free_filter;
1697                 }
1698                 vnic = &bp->vnic_info[0];
1699                 goto done;
1700         }
1701
1702         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1703                 filter->enables |=
1704                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1705                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1706         }
1707
1708         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1709                 filter->enables |=
1710                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1711                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1712         }
1713
1714         vnic = find_matching_vnic(bp, filter);
1715 done:
1716         if (!ret || update_flow) {
1717                 flow->filter = filter;
1718                 flow->vnic = vnic;
1719                 /* VNIC is set only in case of queue or RSS action */
1720                 if (vnic) {
1721                         /*
1722                          * RxQ0 is not used for flow filters.
1723                          */
1724
1725                         if (update_flow) {
1726                                 ret = -EXDEV;
1727                                 goto free_flow;
1728                         }
1729                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1730                 }
1731                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1732                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1733                 bnxt_release_flow_lock(bp);
1734                 return flow;
1735         }
1736         if (!ret) {
1737                 flow->filter = filter;
1738                 flow->vnic = vnic;
1739                 if (update_flow) {
1740                         ret = -EXDEV;
1741                         goto free_flow;
1742                 }
1743                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1744                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1745                 return flow;
1746         }
1747 free_filter:
1748         bnxt_free_filter(bp, filter);
1749 free_flow:
1750         if (ret == -EEXIST)
1751                 rte_flow_error_set(error, ret,
1752                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1753                                    "Matching Flow exists.");
1754         else if (ret == -EXDEV)
1755                 rte_flow_error_set(error, 0,
1756                                    RTE_FLOW_ERROR_TYPE_NONE, NULL,
1757                                    "Flow with pattern exists, updating destination queue");
1758         else
1759                 rte_flow_error_set(error, -ret,
1760                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1761                                    "Failed to create flow.");
1762         rte_free(flow);
1763         flow = NULL;
1764         bnxt_release_flow_lock(bp);
1765         return flow;
1766 }
1767
1768 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1769                                                struct bnxt_filter_info *filter,
1770                                                struct rte_flow_error *error)
1771 {
1772         uint16_t tun_dst_fid;
1773         uint32_t tun_type;
1774         int ret = 0;
1775
1776         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1777         if (ret) {
1778                 rte_flow_error_set(error, -ret,
1779                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1780                                    "Unable to query tunnel to VF");
1781                 return ret;
1782         }
1783         if (tun_type == (1U << filter->tunnel_type)) {
1784                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1785                                                      &tun_dst_fid);
1786                 if (ret) {
1787                         rte_flow_error_set(error, -ret,
1788                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1789                                            NULL,
1790                                            "tunnel_redirect info cmd fail");
1791                         return ret;
1792                 }
1793                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1794                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1795
1796                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1797                  * cmd, just delete the flow from driver
1798                  */
1799                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1800                         PMD_DRV_LOG(ERR,
1801                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1802                 else
1803                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1804                                                         filter->tunnel_type);
1805         }
1806         return ret;
1807 }
1808
1809 static int
1810 bnxt_flow_destroy(struct rte_eth_dev *dev,
1811                   struct rte_flow *flow,
1812                   struct rte_flow_error *error)
1813 {
1814         struct bnxt *bp = dev->data->dev_private;
1815         struct bnxt_filter_info *filter;
1816         struct bnxt_vnic_info *vnic;
1817         int ret = 0;
1818
1819         bnxt_acquire_flow_lock(bp);
1820         if (!flow) {
1821                 rte_flow_error_set(error, EINVAL,
1822                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1823                                    "Invalid flow: failed to destroy flow.");
1824                 bnxt_release_flow_lock(bp);
1825                 return -EINVAL;
1826         }
1827
1828         filter = flow->filter;
1829         vnic = flow->vnic;
1830
1831         if (!filter) {
1832                 rte_flow_error_set(error, EINVAL,
1833                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1834                                    "Invalid flow: failed to destroy flow.");
1835                 bnxt_release_flow_lock(bp);
1836                 return -EINVAL;
1837         }
1838
1839         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1840             filter->enables == filter->tunnel_type) {
1841                 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1842                                                           filter,
1843                                                           error);
1844                 if (!ret) {
1845                         goto done;
1846                 } else {
1847                         bnxt_release_flow_lock(bp);
1848                         return ret;
1849                 }
1850         }
1851
1852         ret = bnxt_match_filter(bp, filter);
1853         if (ret == 0)
1854                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1855
1856         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1857                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1858         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1859                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1860         ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1861
1862 done:
1863         if (!ret) {
1864                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1865                 bnxt_free_filter(bp, filter);
1866                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1867                 rte_free(flow);
1868
1869                 /* If this was the last flow associated with this vnic,
1870                  * switch the queue back to RSS pool.
1871                  */
1872                 if (vnic && STAILQ_EMPTY(&vnic->flow_list)) {
1873                         rte_free(vnic->fw_grp_ids);
1874                         if (vnic->rx_queue_cnt > 1)
1875                                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1876
1877                         bnxt_hwrm_vnic_free(bp, vnic);
1878                         vnic->rx_queue_cnt = 0;
1879                         bp->nr_vnics--;
1880                 }
1881         } else {
1882                 rte_flow_error_set(error, -ret,
1883                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1884                                    "Failed to destroy flow.");
1885         }
1886
1887         bnxt_release_flow_lock(bp);
1888         return ret;
1889 }
1890
1891 static int
1892 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1893 {
1894         struct bnxt *bp = dev->data->dev_private;
1895         struct bnxt_vnic_info *vnic;
1896         struct rte_flow *flow;
1897         unsigned int i;
1898         int ret = 0;
1899
1900         bnxt_acquire_flow_lock(bp);
1901         for (i = 0; i < bp->max_vnics; i++) {
1902                 vnic = &bp->vnic_info[i];
1903                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1904                         continue;
1905
1906                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1907                         struct bnxt_filter_info *filter = flow->filter;
1908
1909                         if (filter->filter_type ==
1910                             HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1911                             filter->enables == filter->tunnel_type) {
1912                                 ret =
1913                                 bnxt_handle_tunnel_redirect_destroy(bp,
1914                                                                     filter,
1915                                                                     error);
1916                                 if (!ret) {
1917                                         goto done;
1918                                 } else {
1919                                         bnxt_release_flow_lock(bp);
1920                                         return ret;
1921                                 }
1922                         }
1923
1924                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1925                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1926                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1927                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1928                         else if (!i)
1929                                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1930
1931                         if (ret) {
1932                                 rte_flow_error_set
1933                                         (error,
1934                                          -ret,
1935                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1936                                          NULL,
1937                                          "Failed to flush flow in HW.");
1938                                 bnxt_release_flow_lock(bp);
1939                                 return -rte_errno;
1940                         }
1941 done:
1942                         bnxt_free_filter(bp, filter);
1943                         STAILQ_REMOVE(&vnic->flow_list, flow,
1944                                       rte_flow, next);
1945                         rte_free(flow);
1946                 }
1947         }
1948
1949         bnxt_release_flow_lock(bp);
1950         return ret;
1951 }
1952
1953 const struct rte_flow_ops bnxt_flow_ops = {
1954         .validate = bnxt_flow_validate,
1955         .create = bnxt_flow_create,
1956         .destroy = bnxt_flow_destroy,
1957         .flush = bnxt_flow_flush,
1958 };