net/bnxt: allow group ID 0 for RSS action
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
21
22 static int
23 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
24                         const struct rte_flow_item pattern[],
25                         const struct rte_flow_action actions[],
26                         struct rte_flow_error *error)
27 {
28         if (!pattern) {
29                 rte_flow_error_set(error,
30                                    EINVAL,
31                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
32                                    NULL,
33                                    "NULL pattern.");
34                 return -rte_errno;
35         }
36
37         if (!actions) {
38                 rte_flow_error_set(error,
39                                    EINVAL,
40                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
41                                    NULL,
42                                    "NULL action.");
43                 return -rte_errno;
44         }
45
46         if (!attr) {
47                 rte_flow_error_set(error,
48                                    EINVAL,
49                                    RTE_FLOW_ERROR_TYPE_ATTR,
50                                    NULL,
51                                    "NULL attribute.");
52                 return -rte_errno;
53         }
54
55         return 0;
56 }
57
58 static const struct rte_flow_item *
59 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
60 {
61         while (1) {
62                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
63                         return cur;
64                 cur++;
65         }
66 }
67
68 static const struct rte_flow_action *
69 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
70 {
71         while (1) {
72                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
73                         return cur;
74                 cur++;
75         }
76 }
77
78 static int
79 bnxt_filter_type_check(const struct rte_flow_item pattern[],
80                        struct rte_flow_error *error)
81 {
82         const struct rte_flow_item *item =
83                 bnxt_flow_non_void_item(pattern);
84         int use_ntuple = 1;
85         bool has_vlan = 0;
86
87         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
88                 switch (item->type) {
89                 case RTE_FLOW_ITEM_TYPE_ANY:
90                 case RTE_FLOW_ITEM_TYPE_ETH:
91                         use_ntuple = 0;
92                         break;
93                 case RTE_FLOW_ITEM_TYPE_VLAN:
94                         use_ntuple = 0;
95                         has_vlan = 1;
96                         break;
97                 case RTE_FLOW_ITEM_TYPE_IPV4:
98                 case RTE_FLOW_ITEM_TYPE_IPV6:
99                 case RTE_FLOW_ITEM_TYPE_TCP:
100                 case RTE_FLOW_ITEM_TYPE_UDP:
101                         /* FALLTHROUGH */
102                         /* need ntuple match, reset exact match */
103                         use_ntuple |= 1;
104                         break;
105                 default:
106                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
107                         use_ntuple |= 0;
108                 }
109                 item++;
110         }
111
112         if (has_vlan && use_ntuple) {
113                 PMD_DRV_LOG(ERR,
114                             "VLAN flow cannot use NTUPLE filter\n");
115                 rte_flow_error_set(error, EINVAL,
116                                    RTE_FLOW_ERROR_TYPE_ITEM,
117                                    item,
118                                    "Cannot use VLAN with NTUPLE");
119                 return -rte_errno;
120         }
121
122         return use_ntuple;
123 }
124
125 static int
126 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
127                                   const struct rte_flow_attr *attr,
128                                   const struct rte_flow_item pattern[],
129                                   struct rte_flow_error *error,
130                                   struct bnxt_filter_info *filter)
131 {
132         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
133         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
134         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
135         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
136         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
137         const struct rte_flow_item_udp *udp_spec, *udp_mask;
138         const struct rte_flow_item_eth *eth_spec, *eth_mask;
139         const struct rte_ether_addr *dst, *src;
140         const struct rte_flow_item_nvgre *nvgre_spec;
141         const struct rte_flow_item_nvgre *nvgre_mask;
142         const struct rte_flow_item_gre *gre_spec;
143         const struct rte_flow_item_gre *gre_mask;
144         const struct rte_flow_item_vxlan *vxlan_spec;
145         const struct rte_flow_item_vxlan *vxlan_mask;
146         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148         const struct rte_flow_item_vf *vf_spec;
149         uint32_t tenant_id_be = 0, valid_flags = 0;
150         bool vni_masked = 0;
151         bool tni_masked = 0;
152         uint32_t en_ethertype;
153         uint8_t inner = 0;
154         uint32_t vf = 0;
155         uint32_t en = 0;
156         int use_ntuple;
157         int dflt_vnic;
158
159         use_ntuple = bnxt_filter_type_check(pattern, error);
160         if (use_ntuple < 0)
161                 return use_ntuple;
162         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
163
164         filter->filter_type = use_ntuple ?
165                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
166         en_ethertype = use_ntuple ?
167                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169
170         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171                 if (item->last) {
172                         /* last or range is NOT supported as match criteria */
173                         rte_flow_error_set(error, EINVAL,
174                                            RTE_FLOW_ERROR_TYPE_ITEM,
175                                            item,
176                                            "No support for range");
177                         return -rte_errno;
178                 }
179
180                 switch (item->type) {
181                 case RTE_FLOW_ITEM_TYPE_ANY:
182                         inner =
183                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
184                         if (inner)
185                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
186                         break;
187                 case RTE_FLOW_ITEM_TYPE_ETH:
188                         if (!item->spec || !item->mask)
189                                 break;
190
191                         eth_spec = item->spec;
192                         eth_mask = item->mask;
193
194                         /* Source MAC address mask cannot be partially set.
195                          * Should be All 0's or all 1's.
196                          * Destination MAC address mask must not be partially
197                          * set. Should be all 1's or all 0's.
198                          */
199                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
200                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
201                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
202                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
203                                 rte_flow_error_set(error,
204                                                    EINVAL,
205                                                    RTE_FLOW_ERROR_TYPE_ITEM,
206                                                    item,
207                                                    "MAC_addr mask not valid");
208                                 return -rte_errno;
209                         }
210
211                         /* Mask is not allowed. Only exact matches are */
212                         if (eth_mask->type &&
213                             eth_mask->type != RTE_BE16(0xffff)) {
214                                 rte_flow_error_set(error, EINVAL,
215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
216                                                    item,
217                                                    "ethertype mask not valid");
218                                 return -rte_errno;
219                         }
220
221                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
222                                 dst = &eth_spec->dst;
223                                 if (!rte_is_valid_assigned_ether_addr(dst)) {
224                                         rte_flow_error_set(error,
225                                                            EINVAL,
226                                                            RTE_FLOW_ERROR_TYPE_ITEM,
227                                                            item,
228                                                            "DMAC is invalid");
229                                         PMD_DRV_LOG(ERR,
230                                                     "DMAC is invalid!\n");
231                                         return -rte_errno;
232                                 }
233                                 rte_memcpy(filter->dst_macaddr,
234                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
235                                 en |= use_ntuple ?
236                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
237                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
238                                 valid_flags |= inner ?
239                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
240                                         BNXT_FLOW_L2_DST_VALID_FLAG;
241                                 filter->priority = attr->priority;
242                                 PMD_DRV_LOG(DEBUG,
243                                             "Creating a priority flow\n");
244                         }
245                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
246                                 src = &eth_spec->src;
247                                 if (!rte_is_valid_assigned_ether_addr(src)) {
248                                         rte_flow_error_set(error,
249                                                            EINVAL,
250                                                            RTE_FLOW_ERROR_TYPE_ITEM,
251                                                            item,
252                                                            "SMAC is invalid");
253                                         PMD_DRV_LOG(ERR,
254                                                     "SMAC is invalid!\n");
255                                         return -rte_errno;
256                                 }
257                                 rte_memcpy(filter->src_macaddr,
258                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
259                                 en |= use_ntuple ?
260                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
261                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
262                                 valid_flags |= inner ?
263                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
264                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
265                         } /*
266                            * else {
267                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
268                            * }
269                            */
270                         if (eth_mask->type) {
271                                 filter->ethertype =
272                                         rte_be_to_cpu_16(eth_spec->type);
273                                 en |= en_ethertype;
274                         }
275                         if (inner)
276                                 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
277
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_VLAN:
280                         vlan_spec = item->spec;
281                         vlan_mask = item->mask;
282                         if (en & en_ethertype) {
283                                 rte_flow_error_set(error, EINVAL,
284                                                    RTE_FLOW_ERROR_TYPE_ITEM,
285                                                    item,
286                                                    "VLAN TPID matching is not"
287                                                    " supported");
288                                 return -rte_errno;
289                         }
290                         if (vlan_mask->tci &&
291                             vlan_mask->tci == RTE_BE16(0x0fff)) {
292                                 /* Only the VLAN ID can be matched. */
293                                 filter->l2_ovlan =
294                                         rte_be_to_cpu_16(vlan_spec->tci &
295                                                          RTE_BE16(0x0fff));
296                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
297                         } else {
298                                 rte_flow_error_set(error,
299                                                    EINVAL,
300                                                    RTE_FLOW_ERROR_TYPE_ITEM,
301                                                    item,
302                                                    "VLAN mask is invalid");
303                                 return -rte_errno;
304                         }
305                         if (vlan_mask->inner_type &&
306                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
307                                 rte_flow_error_set(error, EINVAL,
308                                                    RTE_FLOW_ERROR_TYPE_ITEM,
309                                                    item,
310                                                    "inner ethertype mask not"
311                                                    " valid");
312                                 return -rte_errno;
313                         }
314                         if (vlan_mask->inner_type) {
315                                 filter->ethertype =
316                                         rte_be_to_cpu_16(vlan_spec->inner_type);
317                                 en |= en_ethertype;
318                         }
319
320                         break;
321                 case RTE_FLOW_ITEM_TYPE_IPV4:
322                         /* If mask is not involved, we could use EM filters. */
323                         ipv4_spec = item->spec;
324                         ipv4_mask = item->mask;
325
326                         if (!item->spec || !item->mask)
327                                 break;
328
329                         /* Only IP DST and SRC fields are maskable. */
330                         if (ipv4_mask->hdr.version_ihl ||
331                             ipv4_mask->hdr.type_of_service ||
332                             ipv4_mask->hdr.total_length ||
333                             ipv4_mask->hdr.packet_id ||
334                             ipv4_mask->hdr.fragment_offset ||
335                             ipv4_mask->hdr.time_to_live ||
336                             ipv4_mask->hdr.next_proto_id ||
337                             ipv4_mask->hdr.hdr_checksum) {
338                                 rte_flow_error_set(error,
339                                                    EINVAL,
340                                                    RTE_FLOW_ERROR_TYPE_ITEM,
341                                                    item,
342                                                    "Invalid IPv4 mask.");
343                                 return -rte_errno;
344                         }
345
346                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
347                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
348
349                         if (use_ntuple)
350                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
351                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
352                         else
353                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
354                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
355
356                         if (ipv4_mask->hdr.src_addr) {
357                                 filter->src_ipaddr_mask[0] =
358                                         ipv4_mask->hdr.src_addr;
359                                 en |= !use_ntuple ? 0 :
360                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
361                         }
362
363                         if (ipv4_mask->hdr.dst_addr) {
364                                 filter->dst_ipaddr_mask[0] =
365                                         ipv4_mask->hdr.dst_addr;
366                                 en |= !use_ntuple ? 0 :
367                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
368                         }
369
370                         filter->ip_addr_type = use_ntuple ?
371                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
372                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
373
374                         if (ipv4_spec->hdr.next_proto_id) {
375                                 filter->ip_protocol =
376                                         ipv4_spec->hdr.next_proto_id;
377                                 if (use_ntuple)
378                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
379                                 else
380                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
381                         }
382                         break;
383                 case RTE_FLOW_ITEM_TYPE_IPV6:
384                         ipv6_spec = item->spec;
385                         ipv6_mask = item->mask;
386
387                         if (!item->spec || !item->mask)
388                                 break;
389
390                         /* Only IP DST and SRC fields are maskable. */
391                         if (ipv6_mask->hdr.vtc_flow ||
392                             ipv6_mask->hdr.payload_len ||
393                             ipv6_mask->hdr.proto ||
394                             ipv6_mask->hdr.hop_limits) {
395                                 rte_flow_error_set(error,
396                                                    EINVAL,
397                                                    RTE_FLOW_ERROR_TYPE_ITEM,
398                                                    item,
399                                                    "Invalid IPv6 mask.");
400                                 return -rte_errno;
401                         }
402
403                         if (use_ntuple)
404                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
405                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
406                         else
407                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
408                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
409
410                         rte_memcpy(filter->src_ipaddr,
411                                    ipv6_spec->hdr.src_addr, 16);
412                         rte_memcpy(filter->dst_ipaddr,
413                                    ipv6_spec->hdr.dst_addr, 16);
414
415                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
416                                                    16)) {
417                                 rte_memcpy(filter->src_ipaddr_mask,
418                                            ipv6_mask->hdr.src_addr, 16);
419                                 en |= !use_ntuple ? 0 :
420                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
421                         }
422
423                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
424                                                    16)) {
425                                 rte_memcpy(filter->dst_ipaddr_mask,
426                                            ipv6_mask->hdr.dst_addr, 16);
427                                 en |= !use_ntuple ? 0 :
428                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
429                         }
430
431                         filter->ip_addr_type = use_ntuple ?
432                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
433                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
434                         break;
435                 case RTE_FLOW_ITEM_TYPE_TCP:
436                         tcp_spec = item->spec;
437                         tcp_mask = item->mask;
438
439                         if (!item->spec || !item->mask)
440                                 break;
441
442                         /* Check TCP mask. Only DST & SRC ports are maskable */
443                         if (tcp_mask->hdr.sent_seq ||
444                             tcp_mask->hdr.recv_ack ||
445                             tcp_mask->hdr.data_off ||
446                             tcp_mask->hdr.tcp_flags ||
447                             tcp_mask->hdr.rx_win ||
448                             tcp_mask->hdr.cksum ||
449                             tcp_mask->hdr.tcp_urp) {
450                                 rte_flow_error_set(error,
451                                                    EINVAL,
452                                                    RTE_FLOW_ERROR_TYPE_ITEM,
453                                                    item,
454                                                    "Invalid TCP mask");
455                                 return -rte_errno;
456                         }
457
458                         filter->src_port = tcp_spec->hdr.src_port;
459                         filter->dst_port = tcp_spec->hdr.dst_port;
460
461                         if (use_ntuple)
462                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
463                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
464                         else
465                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
466                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
467
468                         if (tcp_mask->hdr.dst_port) {
469                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
470                                 en |= !use_ntuple ? 0 :
471                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
472                         }
473
474                         if (tcp_mask->hdr.src_port) {
475                                 filter->src_port_mask = tcp_mask->hdr.src_port;
476                                 en |= !use_ntuple ? 0 :
477                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
478                         }
479                         break;
480                 case RTE_FLOW_ITEM_TYPE_UDP:
481                         udp_spec = item->spec;
482                         udp_mask = item->mask;
483
484                         if (!item->spec || !item->mask)
485                                 break;
486
487                         if (udp_mask->hdr.dgram_len ||
488                             udp_mask->hdr.dgram_cksum) {
489                                 rte_flow_error_set(error,
490                                                    EINVAL,
491                                                    RTE_FLOW_ERROR_TYPE_ITEM,
492                                                    item,
493                                                    "Invalid UDP mask");
494                                 return -rte_errno;
495                         }
496
497                         filter->src_port = udp_spec->hdr.src_port;
498                         filter->dst_port = udp_spec->hdr.dst_port;
499
500                         if (use_ntuple)
501                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
502                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
503                         else
504                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
505                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
506
507                         if (udp_mask->hdr.dst_port) {
508                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
509                                 en |= !use_ntuple ? 0 :
510                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
511                         }
512
513                         if (udp_mask->hdr.src_port) {
514                                 filter->src_port_mask = udp_mask->hdr.src_port;
515                                 en |= !use_ntuple ? 0 :
516                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
517                         }
518                         break;
519                 case RTE_FLOW_ITEM_TYPE_VXLAN:
520                         vxlan_spec = item->spec;
521                         vxlan_mask = item->mask;
522                         /* Check if VXLAN item is used to describe protocol.
523                          * If yes, both spec and mask should be NULL.
524                          * If no, both spec and mask shouldn't be NULL.
525                          */
526                         if ((!vxlan_spec && vxlan_mask) ||
527                             (vxlan_spec && !vxlan_mask)) {
528                                 rte_flow_error_set(error,
529                                                    EINVAL,
530                                                    RTE_FLOW_ERROR_TYPE_ITEM,
531                                                    item,
532                                                    "Invalid VXLAN item");
533                                 return -rte_errno;
534                         }
535
536                         if (!vxlan_spec && !vxlan_mask) {
537                                 filter->tunnel_type =
538                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
539                                 break;
540                         }
541
542                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
543                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
544                             vxlan_spec->flags != 0x8) {
545                                 rte_flow_error_set(error,
546                                                    EINVAL,
547                                                    RTE_FLOW_ERROR_TYPE_ITEM,
548                                                    item,
549                                                    "Invalid VXLAN item");
550                                 return -rte_errno;
551                         }
552
553                         /* Check if VNI is masked. */
554                         if (vxlan_spec && vxlan_mask) {
555                                 vni_masked =
556                                         !!memcmp(vxlan_mask->vni, vni_mask,
557                                                  RTE_DIM(vni_mask));
558                                 if (vni_masked) {
559                                         rte_flow_error_set
560                                                 (error,
561                                                  EINVAL,
562                                                  RTE_FLOW_ERROR_TYPE_ITEM,
563                                                  item,
564                                                  "Invalid VNI mask");
565                                         return -rte_errno;
566                                 }
567
568                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
569                                            vxlan_spec->vni, 3);
570                                 filter->vni =
571                                         rte_be_to_cpu_32(tenant_id_be);
572                                 filter->tunnel_type =
573                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
574                         }
575                         break;
576                 case RTE_FLOW_ITEM_TYPE_NVGRE:
577                         nvgre_spec = item->spec;
578                         nvgre_mask = item->mask;
579                         /* Check if NVGRE item is used to describe protocol.
580                          * If yes, both spec and mask should be NULL.
581                          * If no, both spec and mask shouldn't be NULL.
582                          */
583                         if ((!nvgre_spec && nvgre_mask) ||
584                             (nvgre_spec && !nvgre_mask)) {
585                                 rte_flow_error_set(error,
586                                                    EINVAL,
587                                                    RTE_FLOW_ERROR_TYPE_ITEM,
588                                                    item,
589                                                    "Invalid NVGRE item");
590                                 return -rte_errno;
591                         }
592
593                         if (!nvgre_spec && !nvgre_mask) {
594                                 filter->tunnel_type =
595                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
596                                 break;
597                         }
598
599                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
600                             nvgre_spec->protocol != 0x6558) {
601                                 rte_flow_error_set(error,
602                                                    EINVAL,
603                                                    RTE_FLOW_ERROR_TYPE_ITEM,
604                                                    item,
605                                                    "Invalid NVGRE item");
606                                 return -rte_errno;
607                         }
608
609                         if (nvgre_spec && nvgre_mask) {
610                                 tni_masked =
611                                         !!memcmp(nvgre_mask->tni, tni_mask,
612                                                  RTE_DIM(tni_mask));
613                                 if (tni_masked) {
614                                         rte_flow_error_set
615                                                 (error,
616                                                  EINVAL,
617                                                  RTE_FLOW_ERROR_TYPE_ITEM,
618                                                  item,
619                                                  "Invalid TNI mask");
620                                         return -rte_errno;
621                                 }
622                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
623                                            nvgre_spec->tni, 3);
624                                 filter->vni =
625                                         rte_be_to_cpu_32(tenant_id_be);
626                                 filter->tunnel_type =
627                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
628                         }
629                         break;
630
631                 case RTE_FLOW_ITEM_TYPE_GRE:
632                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
633                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
634
635                         /*
636                          *Check if GRE item is used to describe protocol.
637                          * If yes, both spec and mask should be NULL.
638                          * If no, both spec and mask shouldn't be NULL.
639                          */
640                         if (!!gre_spec ^ !!gre_mask) {
641                                 rte_flow_error_set(error, EINVAL,
642                                                    RTE_FLOW_ERROR_TYPE_ITEM,
643                                                    item,
644                                                    "Invalid GRE item");
645                                 return -rte_errno;
646                         }
647
648                         if (!gre_spec && !gre_mask) {
649                                 filter->tunnel_type =
650                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
651                                 break;
652                         }
653                         break;
654
655                 case RTE_FLOW_ITEM_TYPE_VF:
656                         vf_spec = item->spec;
657                         vf = vf_spec->id;
658                         if (!BNXT_PF(bp)) {
659                                 rte_flow_error_set(error,
660                                                    EINVAL,
661                                                    RTE_FLOW_ERROR_TYPE_ITEM,
662                                                    item,
663                                                    "Configuring on a VF!");
664                                 return -rte_errno;
665                         }
666
667                         if (vf >= bp->pdev->max_vfs) {
668                                 rte_flow_error_set(error,
669                                                    EINVAL,
670                                                    RTE_FLOW_ERROR_TYPE_ITEM,
671                                                    item,
672                                                    "Incorrect VF id!");
673                                 return -rte_errno;
674                         }
675
676                         if (!attr->transfer) {
677                                 rte_flow_error_set(error,
678                                                    ENOTSUP,
679                                                    RTE_FLOW_ERROR_TYPE_ITEM,
680                                                    item,
681                                                    "Matching VF traffic without"
682                                                    " affecting it (transfer attribute)"
683                                                    " is unsupported");
684                                 return -rte_errno;
685                         }
686
687                         filter->mirror_vnic_id =
688                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
689                         if (dflt_vnic < 0) {
690                                 /* This simply indicates there's no driver
691                                  * loaded. This is not an error.
692                                  */
693                                 rte_flow_error_set
694                                         (error,
695                                          EINVAL,
696                                          RTE_FLOW_ERROR_TYPE_ITEM,
697                                          item,
698                                          "Unable to get default VNIC for VF");
699                                 return -rte_errno;
700                         }
701
702                         filter->mirror_vnic_id = dflt_vnic;
703                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
704                         break;
705                 default:
706                         break;
707                 }
708                 item++;
709         }
710         filter->enables = en;
711         filter->valid_flags = valid_flags;
712
713         return 0;
714 }
715
716 /* Parse attributes */
717 static int
718 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
719                      struct rte_flow_error *error)
720 {
721         /* Must be input direction */
722         if (!attr->ingress) {
723                 rte_flow_error_set(error,
724                                    EINVAL,
725                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
726                                    attr,
727                                    "Only support ingress.");
728                 return -rte_errno;
729         }
730
731         /* Not supported */
732         if (attr->egress) {
733                 rte_flow_error_set(error,
734                                    EINVAL,
735                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
736                                    attr,
737                                    "No support for egress.");
738                 return -rte_errno;
739         }
740
741         return 0;
742 }
743
744 static struct bnxt_filter_info *
745 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
746 {
747         struct bnxt_filter_info *mf, *f0;
748         struct bnxt_vnic_info *vnic0;
749         int i;
750
751         vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
752         f0 = STAILQ_FIRST(&vnic0->filter);
753
754         /* This flow has same DST MAC as the port/l2 filter. */
755         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
756                 return f0;
757
758         for (i = bp->max_vnics - 1; i >= 0; i--) {
759                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
760
761                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
762                         continue;
763
764                 STAILQ_FOREACH(mf, &vnic->filter, next) {
765
766                         if (mf->matching_l2_fltr_ptr)
767                                 continue;
768
769                         if (mf->ethertype == nf->ethertype &&
770                             mf->l2_ovlan == nf->l2_ovlan &&
771                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
772                             mf->l2_ivlan == nf->l2_ivlan &&
773                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
774                             !memcmp(mf->src_macaddr, nf->src_macaddr,
775                                     RTE_ETHER_ADDR_LEN) &&
776                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
777                                     RTE_ETHER_ADDR_LEN))
778                                 return mf;
779                 }
780         }
781         return NULL;
782 }
783
784 static struct bnxt_filter_info *
785 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
786                       struct bnxt_vnic_info *vnic)
787 {
788         struct bnxt_filter_info *filter1;
789         int rc;
790
791         /* Alloc new L2 filter.
792          * This flow needs MAC filter which does not match any existing
793          * L2 filters.
794          */
795         filter1 = bnxt_get_unused_filter(bp);
796         if (filter1 == NULL)
797                 return NULL;
798
799         memcpy(filter1, nf, sizeof(*filter1));
800
801         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
802         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
803         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
804             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
805                 filter1->flags |=
806                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
807                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
808         }
809
810         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
811             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
812              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
813                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
814                 filter1->flags |=
815                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
816                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
817         } else {
818                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
819                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
820         }
821
822         if (nf->priority &&
823             (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
824              nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
825                 /* Tell the FW where to place the filter in the table. */
826                 if (nf->priority > 65535) {
827                         filter1->pri_hint =
828                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
829                         /* This will place the filter in TCAM */
830                         filter1->l2_filter_id_hint = (uint64_t)-1;
831                 }
832         }
833
834         if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
835                                BNXT_FLOW_L2_SRC_VALID_FLAG |
836                                BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
837                                BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
838                 filter1->enables =
839                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
840                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
841                 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
842         }
843
844         if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
845                 filter1->flags |=
846                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
847                 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
848                         /* Num VLANs for drop filter will/should be 0.
849                          * If the req is memset to 0, then the count will
850                          * be automatically set to 0.
851                          */
852                         if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
853                                 filter1->enables |=
854                                         L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
855                         } else {
856                                 filter1->enables |=
857                                         L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
858                                 filter1->flags |=
859                                 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
860                         }
861                 }
862         }
863
864         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
865                                      filter1);
866         if (rc) {
867                 bnxt_free_filter(bp, filter1);
868                 return NULL;
869         }
870         return filter1;
871 }
872
873 struct bnxt_filter_info *
874 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
875                    struct bnxt_vnic_info *vnic)
876 {
877         struct bnxt_filter_info *l2_filter = NULL;
878
879         l2_filter = bnxt_find_matching_l2_filter(bp, nf);
880         if (l2_filter) {
881                 l2_filter->l2_ref_cnt++;
882         } else {
883                 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
884                 if (l2_filter) {
885                         STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
886                         l2_filter->vnic = vnic;
887                 }
888         }
889         nf->matching_l2_fltr_ptr = l2_filter;
890
891         return l2_filter;
892 }
893
894 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
895 {
896         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
897         uint64_t rx_offloads = dev_conf->rxmode.offloads;
898         int rc;
899
900         rc = bnxt_vnic_grp_alloc(bp, vnic);
901         if (rc)
902                 goto ret;
903
904         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
905         if (rc) {
906                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
907                 goto ret;
908         }
909         bp->nr_vnics++;
910
911         /* RSS context is required only when there is more than one RSS ring */
912         if (vnic->rx_queue_cnt > 1) {
913                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
914                 if (rc) {
915                         PMD_DRV_LOG(ERR,
916                                     "HWRM vnic ctx alloc failure: %x\n", rc);
917                         goto ret;
918                 }
919         } else {
920                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
921         }
922
923         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
924                 vnic->vlan_strip = true;
925         else
926                 vnic->vlan_strip = false;
927
928         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
929         if (rc)
930                 goto ret;
931
932         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
933
934 ret:
935         return rc;
936 }
937
938 static int match_vnic_rss_cfg(struct bnxt *bp,
939                               struct bnxt_vnic_info *vnic,
940                               const struct rte_flow_action_rss *rss)
941 {
942         unsigned int match = 0, i;
943
944         if (vnic->rx_queue_cnt != rss->queue_num)
945                 return -EINVAL;
946
947         for (i = 0; i < rss->queue_num; i++) {
948                 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
949                     !bp->rx_queues[rss->queue[i]]->rx_started)
950                         return -EINVAL;
951         }
952
953         for (i = 0; i < vnic->rx_queue_cnt; i++) {
954                 int j;
955
956                 for (j = 0; j < vnic->rx_queue_cnt; j++) {
957                         if (bp->grp_info[rss->queue[i]].fw_grp_id ==
958                             vnic->fw_grp_ids[j])
959                                 match++;
960                 }
961         }
962
963         if (match != vnic->rx_queue_cnt) {
964                 PMD_DRV_LOG(ERR,
965                             "VNIC queue count %d vs queues matched %d\n",
966                             match, vnic->rx_queue_cnt);
967                 return -EINVAL;
968         }
969
970         return 0;
971 }
972
973 static void
974 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
975                             struct bnxt_filter_info *filter1,
976                             int use_ntuple)
977 {
978         if (!use_ntuple &&
979             !(filter->valid_flags &
980               ~(BNXT_FLOW_L2_DST_VALID_FLAG |
981                 BNXT_FLOW_L2_SRC_VALID_FLAG |
982                 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
983                 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
984                 BNXT_FLOW_L2_DROP_FLAG |
985                 BNXT_FLOW_PARSE_INNER_FLAG))) {
986                 filter->flags = filter1->flags;
987                 filter->enables = filter1->enables;
988                 filter->filter_type = HWRM_CFA_L2_FILTER;
989                 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
990                 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
991                 filter->pri_hint = filter1->pri_hint;
992                 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
993         }
994         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
995         filter->l2_ref_cnt = filter1->l2_ref_cnt;
996         filter->flow_id = filter1->flow_id;
997         PMD_DRV_LOG(DEBUG,
998                 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
999                 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1000 }
1001
1002 static int
1003 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1004                              const struct rte_flow_item pattern[],
1005                              const struct rte_flow_action actions[],
1006                              const struct rte_flow_attr *attr,
1007                              struct rte_flow_error *error,
1008                              struct bnxt_filter_info *filter)
1009 {
1010         const struct rte_flow_action *act =
1011                 bnxt_flow_non_void_action(actions);
1012         struct bnxt *bp = dev->data->dev_private;
1013         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1014         struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1015         const struct rte_flow_action_queue *act_q;
1016         const struct rte_flow_action_vf *act_vf;
1017         struct bnxt_filter_info *filter1 = NULL;
1018         const struct rte_flow_action_rss *rss;
1019         struct bnxt_rx_queue *rxq = NULL;
1020         int dflt_vnic, vnic_id;
1021         unsigned int rss_idx;
1022         uint32_t vf = 0, i;
1023         int rc, use_ntuple;
1024
1025         rc =
1026         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1027         if (rc != 0)
1028                 goto ret;
1029
1030         rc = bnxt_flow_parse_attr(attr, error);
1031         if (rc != 0)
1032                 goto ret;
1033
1034         /* Since we support ingress attribute only - right now. */
1035         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1036                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1037
1038         use_ntuple = bnxt_filter_type_check(pattern, error);
1039
1040 start:
1041         switch (act->type) {
1042         case RTE_FLOW_ACTION_TYPE_QUEUE:
1043                 /* Allow this flow. Redirect to a VNIC. */
1044                 act_q = (const struct rte_flow_action_queue *)act->conf;
1045                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1046                         rte_flow_error_set(error,
1047                                            EINVAL,
1048                                            RTE_FLOW_ERROR_TYPE_ACTION,
1049                                            act,
1050                                            "Invalid queue ID.");
1051                         rc = -rte_errno;
1052                         goto ret;
1053                 }
1054                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1055
1056                 vnic_id = attr->group;
1057                 if (!vnic_id) {
1058                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1059                         vnic_id = act_q->index;
1060                 }
1061
1062                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1063
1064                 vnic = &bp->vnic_info[vnic_id];
1065                 if (vnic->rx_queue_cnt) {
1066                         if (vnic->start_grp_id != act_q->index) {
1067                                 PMD_DRV_LOG(ERR,
1068                                             "VNIC already in use\n");
1069                                 rte_flow_error_set(error,
1070                                                    EINVAL,
1071                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1072                                                    act,
1073                                                    "VNIC already in use");
1074                                 rc = -rte_errno;
1075                                 goto ret;
1076                         }
1077                         goto use_vnic;
1078                 }
1079
1080                 rxq = bp->rx_queues[act_q->index];
1081
1082                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1083                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
1084                         goto use_vnic;
1085
1086                 if (!rxq) {
1087                         PMD_DRV_LOG(ERR,
1088                                     "Queue invalid or used with other VNIC\n");
1089                         rte_flow_error_set(error,
1090                                            EINVAL,
1091                                            RTE_FLOW_ERROR_TYPE_ACTION,
1092                                            act,
1093                                            "Queue invalid queue or in use");
1094                         rc = -rte_errno;
1095                         goto ret;
1096                 }
1097
1098                 rxq->vnic = vnic;
1099                 rxq->rx_started = 1;
1100                 vnic->rx_queue_cnt++;
1101                 vnic->start_grp_id = act_q->index;
1102                 vnic->end_grp_id = act_q->index;
1103                 vnic->func_default = 0; //This is not a default VNIC.
1104
1105                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1106
1107                 rc = bnxt_vnic_prep(bp, vnic);
1108                 if (rc)  {
1109                         rte_flow_error_set(error,
1110                                            EINVAL,
1111                                            RTE_FLOW_ERROR_TYPE_ACTION,
1112                                            act,
1113                                            "VNIC prep fail");
1114                         rc = -rte_errno;
1115                         goto ret;
1116                 }
1117
1118                 PMD_DRV_LOG(DEBUG,
1119                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1120                             act_q->index, vnic, vnic->fw_grp_ids);
1121
1122 use_vnic:
1123                 vnic->ff_pool_idx = vnic_id;
1124                 PMD_DRV_LOG(DEBUG,
1125                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1126                 filter->dst_id = vnic->fw_vnic_id;
1127
1128                 /* For ntuple filter, create the L2 filter with default VNIC.
1129                  * The user specified redirect queue will be set while creating
1130                  * the ntuple filter in hardware.
1131                  */
1132                 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1133                 if (use_ntuple)
1134                         filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1135                 else
1136                         filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1137                 if (filter1 == NULL) {
1138                         rte_flow_error_set(error,
1139                                            ENOSPC,
1140                                            RTE_FLOW_ERROR_TYPE_ACTION,
1141                                            act,
1142                                            "Filter not available");
1143                         rc = -rte_errno;
1144                         goto ret;
1145                 }
1146
1147                 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1148                             filter, filter1, filter1->l2_ref_cnt);
1149                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1150                 break;
1151         case RTE_FLOW_ACTION_TYPE_DROP:
1152                 vnic0 = &bp->vnic_info[0];
1153                 filter->dst_id = vnic0->fw_vnic_id;
1154                 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1155                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1156                 if (filter1 == NULL) {
1157                         rte_flow_error_set(error,
1158                                            ENOSPC,
1159                                            RTE_FLOW_ERROR_TYPE_ACTION,
1160                                            act,
1161                                            "Filter not available");
1162                         rc = -rte_errno;
1163                         goto ret;
1164                 }
1165
1166                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1167                         filter->flags =
1168                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1169                 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1170                         filter->flags =
1171                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1172
1173                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1174                 break;
1175         case RTE_FLOW_ACTION_TYPE_COUNT:
1176                 vnic0 = &bp->vnic_info[0];
1177                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1178                 if (filter1 == NULL) {
1179                         rte_flow_error_set(error,
1180                                            ENOSPC,
1181                                            RTE_FLOW_ERROR_TYPE_ACTION,
1182                                            act,
1183                                            "New filter not available");
1184                         rc = -rte_errno;
1185                         goto ret;
1186                 }
1187
1188                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1189                 filter->flow_id = filter1->flow_id;
1190                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1191                 break;
1192         case RTE_FLOW_ACTION_TYPE_VF:
1193                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1194                 vf = act_vf->id;
1195
1196                 if (filter->tunnel_type ==
1197                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1198                     filter->tunnel_type ==
1199                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1200                         /* If issued on a VF, ensure id is 0 and is trusted */
1201                         if (BNXT_VF(bp)) {
1202                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1203                                         rte_flow_error_set(error, EINVAL,
1204                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1205                                                 act,
1206                                                 "Incorrect VF");
1207                                         rc = -rte_errno;
1208                                         goto ret;
1209                                 }
1210                         }
1211
1212                         filter->enables |= filter->tunnel_type;
1213                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1214                         goto done;
1215                 }
1216
1217                 if (vf >= bp->pdev->max_vfs) {
1218                         rte_flow_error_set(error,
1219                                            EINVAL,
1220                                            RTE_FLOW_ERROR_TYPE_ACTION,
1221                                            act,
1222                                            "Incorrect VF id!");
1223                         rc = -rte_errno;
1224                         goto ret;
1225                 }
1226
1227                 filter->mirror_vnic_id =
1228                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1229                 if (dflt_vnic < 0) {
1230                         /* This simply indicates there's no driver loaded.
1231                          * This is not an error.
1232                          */
1233                         rte_flow_error_set(error,
1234                                            EINVAL,
1235                                            RTE_FLOW_ERROR_TYPE_ACTION,
1236                                            act,
1237                                            "Unable to get default VNIC for VF");
1238                         rc = -rte_errno;
1239                         goto ret;
1240                 }
1241
1242                 filter->mirror_vnic_id = dflt_vnic;
1243                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1244
1245                 vnic0 = &bp->vnic_info[0];
1246                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1247                 if (filter1 == NULL) {
1248                         rte_flow_error_set(error,
1249                                            ENOSPC,
1250                                            RTE_FLOW_ERROR_TYPE_ACTION,
1251                                            act,
1252                                            "New filter not available");
1253                         rc = -rte_errno;
1254                         goto ret;
1255                 }
1256
1257                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1258                 filter->flow_id = filter1->flow_id;
1259                 break;
1260         case RTE_FLOW_ACTION_TYPE_RSS:
1261                 rss = (const struct rte_flow_action_rss *)act->conf;
1262
1263                 vnic_id = attr->group;
1264
1265                 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1266
1267                 vnic = &bp->vnic_info[vnic_id];
1268
1269                 /* Check if requested RSS config matches RSS config of VNIC
1270                  * only if it is not a fresh VNIC configuration.
1271                  * Otherwise the existing VNIC configuration can be used.
1272                  */
1273                 if (vnic->rx_queue_cnt) {
1274                         rc = match_vnic_rss_cfg(bp, vnic, rss);
1275                         if (rc) {
1276                                 PMD_DRV_LOG(ERR,
1277                                             "VNIC and RSS config mismatch\n");
1278                                 rte_flow_error_set(error,
1279                                                    EINVAL,
1280                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1281                                                    act,
1282                                                    "VNIC and RSS cfg mismatch");
1283                                 rc = -rte_errno;
1284                                 goto ret;
1285                         }
1286                         goto vnic_found;
1287                 }
1288
1289                 for (i = 0; i < rss->queue_num; i++) {
1290                         PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1291                                     rss->queue[i]);
1292
1293                         if (!rss->queue[i] ||
1294                             rss->queue[i] >= bp->rx_nr_rings ||
1295                             !bp->rx_queues[rss->queue[i]]) {
1296                                 rte_flow_error_set(error,
1297                                                    EINVAL,
1298                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1299                                                    act,
1300                                                    "Invalid queue ID for RSS");
1301                                 rc = -rte_errno;
1302                                 goto ret;
1303                         }
1304                         rxq = bp->rx_queues[rss->queue[i]];
1305
1306                         if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1307                             INVALID_HW_RING_ID) {
1308                                 PMD_DRV_LOG(ERR,
1309                                             "queue active with other VNIC\n");
1310                                 rte_flow_error_set(error,
1311                                                    EINVAL,
1312                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1313                                                    act,
1314                                                    "Invalid queue ID for RSS");
1315                                 rc = -rte_errno;
1316                                 goto ret;
1317                         }
1318
1319                         rxq->vnic = vnic;
1320                         rxq->rx_started = 1;
1321                         vnic->rx_queue_cnt++;
1322                 }
1323
1324                 vnic->start_grp_id = rss->queue[0];
1325                 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1326                 vnic->func_default = 0; //This is not a default VNIC.
1327
1328                 rc = bnxt_vnic_prep(bp, vnic);
1329                 if (rc) {
1330                         rte_flow_error_set(error,
1331                                            EINVAL,
1332                                            RTE_FLOW_ERROR_TYPE_ACTION,
1333                                            act,
1334                                            "VNIC prep fail");
1335                         rc = -rte_errno;
1336                         goto ret;
1337                 }
1338
1339                 PMD_DRV_LOG(DEBUG,
1340                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1341                             vnic_id, vnic, vnic->fw_grp_ids);
1342
1343                 vnic->ff_pool_idx = vnic_id;
1344                 PMD_DRV_LOG(DEBUG,
1345                             "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1346
1347                 /* This can be done only after vnic_grp_alloc is done. */
1348                 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1349                         vnic->fw_grp_ids[i] =
1350                                 bp->grp_info[rss->queue[i]].fw_grp_id;
1351                         /* Make sure vnic0 does not use these rings. */
1352                         bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1353                                 INVALID_HW_RING_ID;
1354                 }
1355
1356                 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1357                         for (i = 0; i < vnic->rx_queue_cnt; i++)
1358                                 vnic->rss_table[rss_idx++] =
1359                                         vnic->fw_grp_ids[i];
1360                 }
1361
1362                 /* Configure RSS only if the queue count is > 1 */
1363                 if (vnic->rx_queue_cnt > 1) {
1364                         vnic->hash_type =
1365                                 bnxt_rte_to_hwrm_hash_types(rss->types);
1366
1367                         if (!rss->key_len) {
1368                                 /* If hash key has not been specified,
1369                                  * use random hash key.
1370                                  */
1371                                 prandom_bytes(vnic->rss_hash_key,
1372                                               HW_HASH_KEY_SIZE);
1373                         } else {
1374                                 if (rss->key_len > HW_HASH_KEY_SIZE)
1375                                         memcpy(vnic->rss_hash_key,
1376                                                rss->key,
1377                                                HW_HASH_KEY_SIZE);
1378                                 else
1379                                         memcpy(vnic->rss_hash_key,
1380                                                rss->key,
1381                                                rss->key_len);
1382                         }
1383                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1384                 } else {
1385                         PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1386                 }
1387
1388 vnic_found:
1389                 filter->dst_id = vnic->fw_vnic_id;
1390                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1391                 if (filter1 == NULL) {
1392                         rte_flow_error_set(error,
1393                                            ENOSPC,
1394                                            RTE_FLOW_ERROR_TYPE_ACTION,
1395                                            act,
1396                                            "New filter not available");
1397                         rc = -rte_errno;
1398                         goto ret;
1399                 }
1400
1401                 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1402                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1403                 break;
1404         case RTE_FLOW_ACTION_TYPE_MARK:
1405                 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1406                         PMD_DRV_LOG(DEBUG,
1407                                     "Disable vector processing for mark\n");
1408                         rte_flow_error_set(error,
1409                                            ENOTSUP,
1410                                            RTE_FLOW_ERROR_TYPE_ACTION,
1411                                            act,
1412                                            "Disable vector processing for mark");
1413                         rc = -rte_errno;
1414                         goto ret;
1415                 }
1416
1417                 if (bp->mark_table == NULL) {
1418                         rte_flow_error_set(error,
1419                                            ENOMEM,
1420                                            RTE_FLOW_ERROR_TYPE_ACTION,
1421                                            act,
1422                                            "Mark table not allocated.");
1423                         rc = -rte_errno;
1424                         goto ret;
1425                 }
1426
1427                 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1428                 filter->mark = ((const struct rte_flow_action_mark *)
1429                                 act->conf)->id;
1430                 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1431                 break;
1432         default:
1433                 rte_flow_error_set(error,
1434                                    EINVAL,
1435                                    RTE_FLOW_ERROR_TYPE_ACTION,
1436                                    act,
1437                                    "Invalid action.");
1438                 rc = -rte_errno;
1439                 goto ret;
1440         }
1441
1442 done:
1443         act = bnxt_flow_non_void_action(++act);
1444         while (act->type != RTE_FLOW_ACTION_TYPE_END)
1445                 goto start;
1446
1447         return rc;
1448 ret:
1449
1450         if (filter1) {
1451                 bnxt_hwrm_clear_l2_filter(bp, filter1);
1452                 bnxt_free_filter(bp, filter1);
1453         }
1454
1455         if (rte_errno)  {
1456                 if (vnic && STAILQ_EMPTY(&vnic->filter))
1457                         vnic->rx_queue_cnt = 0;
1458
1459                 if (rxq && !vnic->rx_queue_cnt)
1460                         rxq->vnic = &bp->vnic_info[0];
1461         }
1462         return -rte_errno;
1463 }
1464
1465 static
1466 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1467                                           struct bnxt_filter_info *filter)
1468 {
1469         struct bnxt_vnic_info *vnic = NULL;
1470         unsigned int i;
1471
1472         for (i = 0; i < bp->max_vnics; i++) {
1473                 vnic = &bp->vnic_info[i];
1474                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1475                     filter->dst_id == vnic->fw_vnic_id) {
1476                         PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1477                                     vnic->ff_pool_idx);
1478                         return vnic;
1479                 }
1480         }
1481         return NULL;
1482 }
1483
1484 static int
1485 bnxt_flow_validate(struct rte_eth_dev *dev,
1486                    const struct rte_flow_attr *attr,
1487                    const struct rte_flow_item pattern[],
1488                    const struct rte_flow_action actions[],
1489                    struct rte_flow_error *error)
1490 {
1491         struct bnxt *bp = dev->data->dev_private;
1492         struct bnxt_vnic_info *vnic = NULL;
1493         struct bnxt_filter_info *filter;
1494         int ret = 0;
1495
1496         bnxt_acquire_flow_lock(bp);
1497         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1498         if (ret != 0) {
1499                 bnxt_release_flow_lock(bp);
1500                 return ret;
1501         }
1502
1503         filter = bnxt_get_unused_filter(bp);
1504         if (filter == NULL) {
1505                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1506                 bnxt_release_flow_lock(bp);
1507                 return -ENOMEM;
1508         }
1509
1510         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1511                                            error, filter);
1512         if (ret)
1513                 goto exit;
1514
1515         vnic = find_matching_vnic(bp, filter);
1516         if (vnic) {
1517                 if (STAILQ_EMPTY(&vnic->filter)) {
1518                         rte_free(vnic->fw_grp_ids);
1519                         bnxt_hwrm_vnic_ctx_free(bp, vnic);
1520                         bnxt_hwrm_vnic_free(bp, vnic);
1521                         vnic->rx_queue_cnt = 0;
1522                         PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1523                 }
1524         }
1525
1526         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1527                 bnxt_hwrm_clear_em_filter(bp, filter);
1528         else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1529                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1530         else
1531                 bnxt_hwrm_clear_l2_filter(bp, filter);
1532
1533 exit:
1534         /* No need to hold on to this filter if we are just validating flow */
1535         bnxt_free_filter(bp, filter);
1536         bnxt_release_flow_lock(bp);
1537
1538         return ret;
1539 }
1540
1541 static void
1542 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1543                    struct bnxt_filter_info *new_filter)
1544 {
1545         /* Clear the new L2 filter that was created in the previous step in
1546          * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1547          * filter which points to the new destination queue and so we clear
1548          * the previous L2 filter. For ntuple filters, we are going to reuse
1549          * the old L2 filter and create new NTUPLE filter with this new
1550          * destination queue subsequently during bnxt_flow_create. So we
1551          * decrement the ref cnt of the L2 filter that would've been bumped
1552          * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1553          * filter that was referencing it will be deleted now.
1554          */
1555         bnxt_hwrm_clear_l2_filter(bp, old_filter);
1556         if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1557                 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1558         } else {
1559                 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1560                         bnxt_hwrm_clear_em_filter(bp, old_filter);
1561                 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1562                         bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1563         }
1564 }
1565
1566 static int
1567 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1568 {
1569         struct bnxt_filter_info *mf;
1570         struct rte_flow *flow;
1571         int i;
1572
1573         for (i = bp->max_vnics - 1; i >= 0; i--) {
1574                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1575
1576                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1577                         continue;
1578
1579                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1580                         mf = flow->filter;
1581
1582                         if (mf->filter_type == nf->filter_type &&
1583                             mf->flags == nf->flags &&
1584                             mf->src_port == nf->src_port &&
1585                             mf->src_port_mask == nf->src_port_mask &&
1586                             mf->dst_port == nf->dst_port &&
1587                             mf->dst_port_mask == nf->dst_port_mask &&
1588                             mf->ip_protocol == nf->ip_protocol &&
1589                             mf->ip_addr_type == nf->ip_addr_type &&
1590                             mf->ethertype == nf->ethertype &&
1591                             mf->vni == nf->vni &&
1592                             mf->tunnel_type == nf->tunnel_type &&
1593                             mf->l2_ovlan == nf->l2_ovlan &&
1594                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1595                             mf->l2_ivlan == nf->l2_ivlan &&
1596                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1597                             !memcmp(mf->l2_addr, nf->l2_addr,
1598                                     RTE_ETHER_ADDR_LEN) &&
1599                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1600                                     RTE_ETHER_ADDR_LEN) &&
1601                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1602                                     RTE_ETHER_ADDR_LEN) &&
1603                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1604                                     RTE_ETHER_ADDR_LEN) &&
1605                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1606                                     sizeof(nf->src_ipaddr)) &&
1607                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1608                                     sizeof(nf->src_ipaddr_mask)) &&
1609                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1610                                     sizeof(nf->dst_ipaddr)) &&
1611                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1612                                     sizeof(nf->dst_ipaddr_mask))) {
1613                                 if (mf->dst_id == nf->dst_id)
1614                                         return -EEXIST;
1615                                 /* Free the old filter, update flow
1616                                  * with new filter
1617                                  */
1618                                 bnxt_update_filter(bp, mf, nf);
1619                                 STAILQ_REMOVE(&vnic->filter, mf,
1620                                               bnxt_filter_info, next);
1621                                 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1622                                 bnxt_free_filter(bp, mf);
1623                                 flow->filter = nf;
1624                                 return -EXDEV;
1625                         }
1626                 }
1627         }
1628         return 0;
1629 }
1630
1631 static struct rte_flow *
1632 bnxt_flow_create(struct rte_eth_dev *dev,
1633                  const struct rte_flow_attr *attr,
1634                  const struct rte_flow_item pattern[],
1635                  const struct rte_flow_action actions[],
1636                  struct rte_flow_error *error)
1637 {
1638         struct bnxt *bp = dev->data->dev_private;
1639         struct bnxt_vnic_info *vnic = NULL;
1640         struct bnxt_filter_info *filter;
1641         bool update_flow = false;
1642         struct rte_flow *flow;
1643         int ret = 0;
1644         uint32_t tun_type;
1645
1646         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1647                 rte_flow_error_set(error, EINVAL,
1648                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1649                                    "Failed to create flow, Not a Trusted VF!");
1650                 return NULL;
1651         }
1652
1653         if (!dev->data->dev_started) {
1654                 rte_flow_error_set(error,
1655                                    EINVAL,
1656                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1657                                    NULL,
1658                                    "Device must be started");
1659                 return NULL;
1660         }
1661
1662         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1663         if (!flow) {
1664                 rte_flow_error_set(error, ENOMEM,
1665                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1666                                    "Failed to allocate memory");
1667                 return flow;
1668         }
1669
1670         bnxt_acquire_flow_lock(bp);
1671         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1672         if (ret != 0) {
1673                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1674                 goto free_flow;
1675         }
1676
1677         filter = bnxt_get_unused_filter(bp);
1678         if (filter == NULL) {
1679                 rte_flow_error_set(error, ENOSPC,
1680                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1681                                    "Not enough resources for a new flow");
1682                 goto free_flow;
1683         }
1684
1685         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1686                                            error, filter);
1687         if (ret != 0)
1688                 goto free_filter;
1689
1690         ret = bnxt_match_filter(bp, filter);
1691         if (ret == -EEXIST) {
1692                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1693                 /* Clear the filter that was created as part of
1694                  * validate_and_parse_flow() above
1695                  */
1696                 bnxt_hwrm_clear_l2_filter(bp, filter);
1697                 goto free_filter;
1698         } else if (ret == -EXDEV) {
1699                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1700                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1701                 update_flow = true;
1702         }
1703
1704         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1705          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1706          * in such a case.
1707          */
1708         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1709             filter->enables == filter->tunnel_type) {
1710                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1711                 if (ret) {
1712                         rte_flow_error_set(error, -ret,
1713                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1714                                            "Unable to query tunnel to VF");
1715                         goto free_filter;
1716                 }
1717                 if (tun_type == (1U << filter->tunnel_type)) {
1718                         ret =
1719                         bnxt_hwrm_tunnel_redirect_free(bp,
1720                                                        filter->tunnel_type);
1721                         if (ret) {
1722                                 PMD_DRV_LOG(ERR,
1723                                             "Unable to free existing tunnel\n");
1724                                 rte_flow_error_set(error, -ret,
1725                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1726                                                    NULL,
1727                                                    "Unable to free preexisting "
1728                                                    "tunnel on VF");
1729                                 goto free_filter;
1730                         }
1731                 }
1732                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1733                 if (ret) {
1734                         rte_flow_error_set(error, -ret,
1735                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1736                                            "Unable to redirect tunnel to VF");
1737                         goto free_filter;
1738                 }
1739                 vnic = &bp->vnic_info[0];
1740                 goto done;
1741         }
1742
1743         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1744                 filter->enables |=
1745                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1746                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1747         }
1748
1749         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1750                 filter->enables |=
1751                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1752                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1753         }
1754
1755         vnic = find_matching_vnic(bp, filter);
1756 done:
1757         if (!ret || update_flow) {
1758                 flow->filter = filter;
1759                 flow->vnic = vnic;
1760                 if (update_flow) {
1761                         ret = -EXDEV;
1762                         goto free_flow;
1763                 }
1764
1765                 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1766                 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1767                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1768                 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1769                         PMD_DRV_LOG(DEBUG,
1770                                     "Mark action: mark id 0x%x, flow id 0x%x\n",
1771                                     filter->mark, filter->flow_id);
1772
1773                         /* TCAM and EM should be 16-bit only.
1774                          * Other modes not supported.
1775                          */
1776                         bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] =
1777                                 filter->mark;
1778                 }
1779                 bnxt_release_flow_lock(bp);
1780                 return flow;
1781         }
1782
1783 free_filter:
1784         bnxt_free_filter(bp, filter);
1785 free_flow:
1786         if (ret == -EEXIST)
1787                 rte_flow_error_set(error, ret,
1788                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1789                                    "Matching Flow exists.");
1790         else if (ret == -EXDEV)
1791                 rte_flow_error_set(error, 0,
1792                                    RTE_FLOW_ERROR_TYPE_NONE, NULL,
1793                                    "Flow with pattern exists, updating destination queue");
1794         else if (!rte_errno)
1795                 rte_flow_error_set(error, -ret,
1796                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1797                                    "Failed to create flow.");
1798         rte_free(flow);
1799         flow = NULL;
1800         bnxt_release_flow_lock(bp);
1801         return flow;
1802 }
1803
1804 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1805                                                struct bnxt_filter_info *filter,
1806                                                struct rte_flow_error *error)
1807 {
1808         uint16_t tun_dst_fid;
1809         uint32_t tun_type;
1810         int ret = 0;
1811
1812         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1813         if (ret) {
1814                 rte_flow_error_set(error, -ret,
1815                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1816                                    "Unable to query tunnel to VF");
1817                 return ret;
1818         }
1819         if (tun_type == (1U << filter->tunnel_type)) {
1820                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1821                                                      &tun_dst_fid);
1822                 if (ret) {
1823                         rte_flow_error_set(error, -ret,
1824                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1825                                            NULL,
1826                                            "tunnel_redirect info cmd fail");
1827                         return ret;
1828                 }
1829                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1830                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1831
1832                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1833                  * cmd, just delete the flow from driver
1834                  */
1835                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1836                         PMD_DRV_LOG(ERR,
1837                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1838                 else
1839                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1840                                                         filter->tunnel_type);
1841         }
1842         return ret;
1843 }
1844
1845 static int
1846 _bnxt_flow_destroy(struct bnxt *bp,
1847                    struct rte_flow *flow,
1848                     struct rte_flow_error *error)
1849 {
1850         struct bnxt_filter_info *filter;
1851         struct bnxt_vnic_info *vnic;
1852         int ret = 0;
1853
1854         filter = flow->filter;
1855         vnic = flow->vnic;
1856
1857         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1858             filter->enables == filter->tunnel_type) {
1859                 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1860                 if (!ret)
1861                         goto done;
1862                 else
1863                         return ret;
1864         }
1865
1866         ret = bnxt_match_filter(bp, filter);
1867         if (ret == 0)
1868                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1869
1870         if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1871                 bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] = 0;
1872                 filter->flow_id = 0;
1873         }
1874
1875         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1876                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1877         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1878                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1879         ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1880
1881 done:
1882         if (!ret) {
1883                 /* If it is a L2 drop filter, when the filter is created,
1884                  * the FW updates the BC/MC records.
1885                  * Once this filter is removed, issue the set_rx_mask command
1886                  * to reset the BC/MC records in the HW to the settings
1887                  * before the drop counter is created.
1888                  */
1889                 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1890                         bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1891
1892                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1893                 bnxt_free_filter(bp, filter);
1894                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1895                 rte_free(flow);
1896
1897                 /* If this was the last flow associated with this vnic,
1898                  * switch the queue back to RSS pool.
1899                  */
1900                 if (vnic && !vnic->func_default &&
1901                     STAILQ_EMPTY(&vnic->flow_list)) {
1902                         rte_free(vnic->fw_grp_ids);
1903                         if (vnic->rx_queue_cnt > 1)
1904                                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1905
1906                         bnxt_hwrm_vnic_free(bp, vnic);
1907                         vnic->rx_queue_cnt = 0;
1908                 }
1909         } else {
1910                 rte_flow_error_set(error, -ret,
1911                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1912                                    "Failed to destroy flow.");
1913         }
1914
1915         return ret;
1916 }
1917
1918 static int
1919 bnxt_flow_destroy(struct rte_eth_dev *dev,
1920                   struct rte_flow *flow,
1921                   struct rte_flow_error *error)
1922 {
1923         struct bnxt *bp = dev->data->dev_private;
1924         int ret = 0;
1925
1926         bnxt_acquire_flow_lock(bp);
1927         if (!flow) {
1928                 rte_flow_error_set(error, EINVAL,
1929                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1930                                    "Invalid flow: failed to destroy flow.");
1931                 bnxt_release_flow_lock(bp);
1932                 return -EINVAL;
1933         }
1934
1935         if (!flow->filter) {
1936                 rte_flow_error_set(error, EINVAL,
1937                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1938                                    "Invalid flow: failed to destroy flow.");
1939                 bnxt_release_flow_lock(bp);
1940                 return -EINVAL;
1941         }
1942         ret = _bnxt_flow_destroy(bp, flow, error);
1943         bnxt_release_flow_lock(bp);
1944
1945         return ret;
1946 }
1947
1948 static int
1949 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1950 {
1951         struct bnxt *bp = dev->data->dev_private;
1952         struct bnxt_vnic_info *vnic;
1953         struct rte_flow *flow;
1954         unsigned int i;
1955         int ret = 0;
1956
1957         bnxt_acquire_flow_lock(bp);
1958         for (i = 0; i < bp->max_vnics; i++) {
1959                 vnic = &bp->vnic_info[i];
1960                 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1961                         continue;
1962
1963                 while (!STAILQ_EMPTY(&vnic->flow_list)) {
1964                         flow = STAILQ_FIRST(&vnic->flow_list);
1965
1966                         if (!flow->filter)
1967                                 continue;
1968
1969                         ret = _bnxt_flow_destroy(bp, flow, error);
1970                         if (ret)
1971                                 break;
1972                 }
1973         }
1974         bnxt_release_flow_lock(bp);
1975
1976         return ret;
1977 }
1978
1979 const struct rte_flow_ops bnxt_flow_ops = {
1980         .validate = bnxt_flow_validate,
1981         .create = bnxt_flow_create,
1982         .destroy = bnxt_flow_destroy,
1983         .flush = bnxt_flow_flush,
1984 };