4e6b4d7fce85b800976a2da4e2c1da91d21446f7
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
22
23 static int
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25                         const struct rte_flow_item pattern[],
26                         const struct rte_flow_action actions[],
27                         struct rte_flow_error *error)
28 {
29         if (!pattern) {
30                 rte_flow_error_set(error,
31                                    EINVAL,
32                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
33                                    NULL,
34                                    "NULL pattern.");
35                 return -rte_errno;
36         }
37
38         if (!actions) {
39                 rte_flow_error_set(error,
40                                    EINVAL,
41                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
42                                    NULL,
43                                    "NULL action.");
44                 return -rte_errno;
45         }
46
47         if (!attr) {
48                 rte_flow_error_set(error,
49                                    EINVAL,
50                                    RTE_FLOW_ERROR_TYPE_ATTR,
51                                    NULL,
52                                    "NULL attribute.");
53                 return -rte_errno;
54         }
55
56         return 0;
57 }
58
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
61 {
62         while (1) {
63                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
64                         return cur;
65                 cur++;
66         }
67 }
68
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
71 {
72         while (1) {
73                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
74                         return cur;
75                 cur++;
76         }
77 }
78
79 static int
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81                        struct rte_flow_error *error __rte_unused)
82 {
83         const struct rte_flow_item *item =
84                 bnxt_flow_non_void_item(pattern);
85         int use_ntuple = 1;
86         bool has_vlan = 0;
87
88         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89                 switch (item->type) {
90                 case RTE_FLOW_ITEM_TYPE_ANY:
91                 case RTE_FLOW_ITEM_TYPE_ETH:
92                         use_ntuple = 0;
93                         break;
94                 case RTE_FLOW_ITEM_TYPE_VLAN:
95                         use_ntuple = 0;
96                         has_vlan = 1;
97                         break;
98                 case RTE_FLOW_ITEM_TYPE_IPV4:
99                 case RTE_FLOW_ITEM_TYPE_IPV6:
100                 case RTE_FLOW_ITEM_TYPE_TCP:
101                 case RTE_FLOW_ITEM_TYPE_UDP:
102                         /* FALLTHROUGH */
103                         /* need ntuple match, reset exact match */
104                         use_ntuple |= 1;
105                         break;
106                 default:
107                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
108                         use_ntuple |= 0;
109                 }
110                 item++;
111         }
112
113         if (has_vlan && use_ntuple) {
114                 PMD_DRV_LOG(ERR,
115                             "VLAN flow cannot use NTUPLE filter\n");
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ITEM,
118                                    item,
119                                    "Cannot use VLAN with NTUPLE");
120                 return -rte_errno;
121         }
122
123         return use_ntuple;
124 }
125
126 static int
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128                                   const struct rte_flow_attr *attr,
129                                   const struct rte_flow_item pattern[],
130                                   struct rte_flow_error *error,
131                                   struct bnxt_filter_info *filter)
132 {
133         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138         const struct rte_flow_item_udp *udp_spec, *udp_mask;
139         const struct rte_flow_item_eth *eth_spec, *eth_mask;
140         const struct rte_flow_item_nvgre *nvgre_spec;
141         const struct rte_flow_item_nvgre *nvgre_mask;
142         const struct rte_flow_item_gre *gre_spec;
143         const struct rte_flow_item_gre *gre_mask;
144         const struct rte_flow_item_vxlan *vxlan_spec;
145         const struct rte_flow_item_vxlan *vxlan_mask;
146         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148         const struct rte_flow_item_vf *vf_spec;
149         uint32_t tenant_id_be = 0, valid_flags = 0;
150         bool vni_masked = 0;
151         bool tni_masked = 0;
152         uint32_t en_ethertype;
153         uint8_t inner = 0;
154         uint32_t vf = 0;
155         uint32_t en = 0;
156         int use_ntuple;
157         int dflt_vnic;
158
159         use_ntuple = bnxt_filter_type_check(pattern, error);
160         if (use_ntuple < 0)
161                 return use_ntuple;
162         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
163
164         filter->filter_type = use_ntuple ?
165                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166         en_ethertype = use_ntuple ?
167                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169
170         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171                 if (item->last) {
172                         /* last or range is NOT supported as match criteria */
173                         rte_flow_error_set(error, EINVAL,
174                                            RTE_FLOW_ERROR_TYPE_ITEM,
175                                            item,
176                                            "No support for range");
177                         return -rte_errno;
178                 }
179
180                 switch (item->type) {
181                 case RTE_FLOW_ITEM_TYPE_ANY:
182                         inner =
183                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
184                         if (inner)
185                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
186                         break;
187                 case RTE_FLOW_ITEM_TYPE_ETH:
188                         if (!item->spec || !item->mask)
189                                 break;
190
191                         eth_spec = item->spec;
192                         eth_mask = item->mask;
193
194                         /* Source MAC address mask cannot be partially set.
195                          * Should be All 0's or all 1's.
196                          * Destination MAC address mask must not be partially
197                          * set. Should be all 1's or all 0's.
198                          */
199                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
200                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
201                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
202                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
203                                 rte_flow_error_set(error,
204                                                    EINVAL,
205                                                    RTE_FLOW_ERROR_TYPE_ITEM,
206                                                    item,
207                                                    "MAC_addr mask not valid");
208                                 return -rte_errno;
209                         }
210
211                         /* Mask is not allowed. Only exact matches are */
212                         if (eth_mask->type &&
213                             eth_mask->type != RTE_BE16(0xffff)) {
214                                 rte_flow_error_set(error, EINVAL,
215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
216                                                    item,
217                                                    "ethertype mask not valid");
218                                 return -rte_errno;
219                         }
220
221                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
222                                 if (!rte_is_unicast_ether_addr(&eth_spec->dst)) {
223                                         rte_flow_error_set(error,
224                                                            EINVAL,
225                                                            RTE_FLOW_ERROR_TYPE_ITEM,
226                                                            item,
227                                                            "DMAC is invalid");
228                                         return -rte_errno;
229                                 }
230                                 rte_memcpy(filter->dst_macaddr,
231                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
232                                 en |= use_ntuple ?
233                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
234                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
235                                 valid_flags |= inner ?
236                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
237                                         BNXT_FLOW_L2_DST_VALID_FLAG;
238                                 filter->priority = attr->priority;
239                                 PMD_DRV_LOG(DEBUG,
240                                             "Creating a priority flow\n");
241                         }
242
243                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
244                                 if (!rte_is_unicast_ether_addr(&eth_spec->src)) {
245                                         rte_flow_error_set(error,
246                                                            EINVAL,
247                                                            RTE_FLOW_ERROR_TYPE_ITEM,
248                                                            item,
249                                                            "SMAC is invalid");
250                                         return -rte_errno;
251                                 }
252                                 rte_memcpy(filter->src_macaddr,
253                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
254                                 en |= use_ntuple ?
255                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
256                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
257                                 valid_flags |= inner ?
258                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
259                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
260                         } /*
261                            * else {
262                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
263                            * }
264                            */
265                         if (eth_mask->type) {
266                                 filter->ethertype =
267                                         rte_be_to_cpu_16(eth_spec->type);
268                                 en |= en_ethertype;
269                         }
270
271                         break;
272                 case RTE_FLOW_ITEM_TYPE_VLAN:
273                         vlan_spec = item->spec;
274                         vlan_mask = item->mask;
275                         if (en & en_ethertype) {
276                                 rte_flow_error_set(error, EINVAL,
277                                                    RTE_FLOW_ERROR_TYPE_ITEM,
278                                                    item,
279                                                    "VLAN TPID matching is not"
280                                                    " supported");
281                                 return -rte_errno;
282                         }
283                         if (vlan_mask->tci &&
284                             vlan_mask->tci == RTE_BE16(0x0fff)) {
285                                 /* Only the VLAN ID can be matched. */
286                                 filter->l2_ovlan =
287                                         rte_be_to_cpu_16(vlan_spec->tci &
288                                                          RTE_BE16(0x0fff));
289                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
290                         } else {
291                                 rte_flow_error_set(error,
292                                                    EINVAL,
293                                                    RTE_FLOW_ERROR_TYPE_ITEM,
294                                                    item,
295                                                    "VLAN mask is invalid");
296                                 return -rte_errno;
297                         }
298                         if (vlan_mask->inner_type &&
299                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
300                                 rte_flow_error_set(error, EINVAL,
301                                                    RTE_FLOW_ERROR_TYPE_ITEM,
302                                                    item,
303                                                    "inner ethertype mask not"
304                                                    " valid");
305                                 return -rte_errno;
306                         }
307                         if (vlan_mask->inner_type) {
308                                 filter->ethertype =
309                                         rte_be_to_cpu_16(vlan_spec->inner_type);
310                                 en |= en_ethertype;
311                         }
312
313                         break;
314                 case RTE_FLOW_ITEM_TYPE_IPV4:
315                         /* If mask is not involved, we could use EM filters. */
316                         ipv4_spec = item->spec;
317                         ipv4_mask = item->mask;
318
319                         if (!item->spec || !item->mask)
320                                 break;
321
322                         /* Only IP DST and SRC fields are maskable. */
323                         if (ipv4_mask->hdr.version_ihl ||
324                             ipv4_mask->hdr.type_of_service ||
325                             ipv4_mask->hdr.total_length ||
326                             ipv4_mask->hdr.packet_id ||
327                             ipv4_mask->hdr.fragment_offset ||
328                             ipv4_mask->hdr.time_to_live ||
329                             ipv4_mask->hdr.next_proto_id ||
330                             ipv4_mask->hdr.hdr_checksum) {
331                                 rte_flow_error_set(error,
332                                                    EINVAL,
333                                                    RTE_FLOW_ERROR_TYPE_ITEM,
334                                                    item,
335                                                    "Invalid IPv4 mask.");
336                                 return -rte_errno;
337                         }
338
339                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
340                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
341
342                         if (use_ntuple)
343                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
344                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
345                         else
346                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
347                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
348
349                         if (ipv4_mask->hdr.src_addr) {
350                                 filter->src_ipaddr_mask[0] =
351                                         ipv4_mask->hdr.src_addr;
352                                 en |= !use_ntuple ? 0 :
353                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
354                         }
355
356                         if (ipv4_mask->hdr.dst_addr) {
357                                 filter->dst_ipaddr_mask[0] =
358                                         ipv4_mask->hdr.dst_addr;
359                                 en |= !use_ntuple ? 0 :
360                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
361                         }
362
363                         filter->ip_addr_type = use_ntuple ?
364                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
365                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
366
367                         if (ipv4_spec->hdr.next_proto_id) {
368                                 filter->ip_protocol =
369                                         ipv4_spec->hdr.next_proto_id;
370                                 if (use_ntuple)
371                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
372                                 else
373                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
374                         }
375                         break;
376                 case RTE_FLOW_ITEM_TYPE_IPV6:
377                         ipv6_spec = item->spec;
378                         ipv6_mask = item->mask;
379
380                         if (!item->spec || !item->mask)
381                                 break;
382
383                         /* Only IP DST and SRC fields are maskable. */
384                         if (ipv6_mask->hdr.vtc_flow ||
385                             ipv6_mask->hdr.payload_len ||
386                             ipv6_mask->hdr.proto ||
387                             ipv6_mask->hdr.hop_limits) {
388                                 rte_flow_error_set(error,
389                                                    EINVAL,
390                                                    RTE_FLOW_ERROR_TYPE_ITEM,
391                                                    item,
392                                                    "Invalid IPv6 mask.");
393                                 return -rte_errno;
394                         }
395
396                         if (use_ntuple)
397                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
398                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
399                         else
400                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
401                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
402
403                         rte_memcpy(filter->src_ipaddr,
404                                    ipv6_spec->hdr.src_addr, 16);
405                         rte_memcpy(filter->dst_ipaddr,
406                                    ipv6_spec->hdr.dst_addr, 16);
407
408                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
409                                                    16)) {
410                                 rte_memcpy(filter->src_ipaddr_mask,
411                                            ipv6_mask->hdr.src_addr, 16);
412                                 en |= !use_ntuple ? 0 :
413                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
414                         }
415
416                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
417                                                    16)) {
418                                 rte_memcpy(filter->dst_ipaddr_mask,
419                                            ipv6_mask->hdr.dst_addr, 16);
420                                 en |= !use_ntuple ? 0 :
421                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
422                         }
423
424                         filter->ip_addr_type = use_ntuple ?
425                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
426                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
427                         break;
428                 case RTE_FLOW_ITEM_TYPE_TCP:
429                         tcp_spec = item->spec;
430                         tcp_mask = item->mask;
431
432                         if (!item->spec || !item->mask)
433                                 break;
434
435                         /* Check TCP mask. Only DST & SRC ports are maskable */
436                         if (tcp_mask->hdr.sent_seq ||
437                             tcp_mask->hdr.recv_ack ||
438                             tcp_mask->hdr.data_off ||
439                             tcp_mask->hdr.tcp_flags ||
440                             tcp_mask->hdr.rx_win ||
441                             tcp_mask->hdr.cksum ||
442                             tcp_mask->hdr.tcp_urp) {
443                                 rte_flow_error_set(error,
444                                                    EINVAL,
445                                                    RTE_FLOW_ERROR_TYPE_ITEM,
446                                                    item,
447                                                    "Invalid TCP mask");
448                                 return -rte_errno;
449                         }
450
451                         filter->src_port = tcp_spec->hdr.src_port;
452                         filter->dst_port = tcp_spec->hdr.dst_port;
453
454                         if (use_ntuple)
455                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
456                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
457                         else
458                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
459                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
460
461                         if (tcp_mask->hdr.dst_port) {
462                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
463                                 en |= !use_ntuple ? 0 :
464                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
465                         }
466
467                         if (tcp_mask->hdr.src_port) {
468                                 filter->src_port_mask = tcp_mask->hdr.src_port;
469                                 en |= !use_ntuple ? 0 :
470                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
471                         }
472                         break;
473                 case RTE_FLOW_ITEM_TYPE_UDP:
474                         udp_spec = item->spec;
475                         udp_mask = item->mask;
476
477                         if (!item->spec || !item->mask)
478                                 break;
479
480                         if (udp_mask->hdr.dgram_len ||
481                             udp_mask->hdr.dgram_cksum) {
482                                 rte_flow_error_set(error,
483                                                    EINVAL,
484                                                    RTE_FLOW_ERROR_TYPE_ITEM,
485                                                    item,
486                                                    "Invalid UDP mask");
487                                 return -rte_errno;
488                         }
489
490                         filter->src_port = udp_spec->hdr.src_port;
491                         filter->dst_port = udp_spec->hdr.dst_port;
492
493                         if (use_ntuple)
494                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
495                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
496                         else
497                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
498                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
499
500                         if (udp_mask->hdr.dst_port) {
501                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
502                                 en |= !use_ntuple ? 0 :
503                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
504                         }
505
506                         if (udp_mask->hdr.src_port) {
507                                 filter->src_port_mask = udp_mask->hdr.src_port;
508                                 en |= !use_ntuple ? 0 :
509                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
510                         }
511                         break;
512                 case RTE_FLOW_ITEM_TYPE_VXLAN:
513                         vxlan_spec = item->spec;
514                         vxlan_mask = item->mask;
515                         /* Check if VXLAN item is used to describe protocol.
516                          * If yes, both spec and mask should be NULL.
517                          * If no, both spec and mask shouldn't be NULL.
518                          */
519                         if ((!vxlan_spec && vxlan_mask) ||
520                             (vxlan_spec && !vxlan_mask)) {
521                                 rte_flow_error_set(error,
522                                                    EINVAL,
523                                                    RTE_FLOW_ERROR_TYPE_ITEM,
524                                                    item,
525                                                    "Invalid VXLAN item");
526                                 return -rte_errno;
527                         }
528
529                         if (!vxlan_spec && !vxlan_mask) {
530                                 filter->tunnel_type =
531                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
532                                 break;
533                         }
534
535                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
536                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
537                             vxlan_spec->flags != 0x8) {
538                                 rte_flow_error_set(error,
539                                                    EINVAL,
540                                                    RTE_FLOW_ERROR_TYPE_ITEM,
541                                                    item,
542                                                    "Invalid VXLAN item");
543                                 return -rte_errno;
544                         }
545
546                         /* Check if VNI is masked. */
547                         if (vxlan_spec && vxlan_mask) {
548                                 vni_masked =
549                                         !!memcmp(vxlan_mask->vni, vni_mask,
550                                                  RTE_DIM(vni_mask));
551                                 if (vni_masked) {
552                                         rte_flow_error_set
553                                                 (error,
554                                                  EINVAL,
555                                                  RTE_FLOW_ERROR_TYPE_ITEM,
556                                                  item,
557                                                  "Invalid VNI mask");
558                                         return -rte_errno;
559                                 }
560
561                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
562                                            vxlan_spec->vni, 3);
563                                 filter->vni =
564                                         rte_be_to_cpu_32(tenant_id_be);
565                                 filter->tunnel_type =
566                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
567                         }
568                         break;
569                 case RTE_FLOW_ITEM_TYPE_NVGRE:
570                         nvgre_spec = item->spec;
571                         nvgre_mask = item->mask;
572                         /* Check if NVGRE item is used to describe protocol.
573                          * If yes, both spec and mask should be NULL.
574                          * If no, both spec and mask shouldn't be NULL.
575                          */
576                         if ((!nvgre_spec && nvgre_mask) ||
577                             (nvgre_spec && !nvgre_mask)) {
578                                 rte_flow_error_set(error,
579                                                    EINVAL,
580                                                    RTE_FLOW_ERROR_TYPE_ITEM,
581                                                    item,
582                                                    "Invalid NVGRE item");
583                                 return -rte_errno;
584                         }
585
586                         if (!nvgre_spec && !nvgre_mask) {
587                                 filter->tunnel_type =
588                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
589                                 break;
590                         }
591
592                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
593                             nvgre_spec->protocol != 0x6558) {
594                                 rte_flow_error_set(error,
595                                                    EINVAL,
596                                                    RTE_FLOW_ERROR_TYPE_ITEM,
597                                                    item,
598                                                    "Invalid NVGRE item");
599                                 return -rte_errno;
600                         }
601
602                         if (nvgre_spec && nvgre_mask) {
603                                 tni_masked =
604                                         !!memcmp(nvgre_mask->tni, tni_mask,
605                                                  RTE_DIM(tni_mask));
606                                 if (tni_masked) {
607                                         rte_flow_error_set
608                                                 (error,
609                                                  EINVAL,
610                                                  RTE_FLOW_ERROR_TYPE_ITEM,
611                                                  item,
612                                                  "Invalid TNI mask");
613                                         return -rte_errno;
614                                 }
615                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
616                                            nvgre_spec->tni, 3);
617                                 filter->vni =
618                                         rte_be_to_cpu_32(tenant_id_be);
619                                 filter->tunnel_type =
620                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
621                         }
622                         break;
623
624                 case RTE_FLOW_ITEM_TYPE_GRE:
625                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
626                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
627
628                         /*
629                          *Check if GRE item is used to describe protocol.
630                          * If yes, both spec and mask should be NULL.
631                          * If no, both spec and mask shouldn't be NULL.
632                          */
633                         if (!!gre_spec ^ !!gre_mask) {
634                                 rte_flow_error_set(error, EINVAL,
635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
636                                                    item,
637                                                    "Invalid GRE item");
638                                 return -rte_errno;
639                         }
640
641                         if (!gre_spec && !gre_mask) {
642                                 filter->tunnel_type =
643                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
644                                 break;
645                         }
646                         break;
647
648                 case RTE_FLOW_ITEM_TYPE_VF:
649                         vf_spec = item->spec;
650                         vf = vf_spec->id;
651                         if (!BNXT_PF(bp)) {
652                                 rte_flow_error_set(error,
653                                                    EINVAL,
654                                                    RTE_FLOW_ERROR_TYPE_ITEM,
655                                                    item,
656                                                    "Configuring on a VF!");
657                                 return -rte_errno;
658                         }
659
660                         if (vf >= bp->pdev->max_vfs) {
661                                 rte_flow_error_set(error,
662                                                    EINVAL,
663                                                    RTE_FLOW_ERROR_TYPE_ITEM,
664                                                    item,
665                                                    "Incorrect VF id!");
666                                 return -rte_errno;
667                         }
668
669                         if (!attr->transfer) {
670                                 rte_flow_error_set(error,
671                                                    ENOTSUP,
672                                                    RTE_FLOW_ERROR_TYPE_ITEM,
673                                                    item,
674                                                    "Matching VF traffic without"
675                                                    " affecting it (transfer attribute)"
676                                                    " is unsupported");
677                                 return -rte_errno;
678                         }
679
680                         filter->mirror_vnic_id =
681                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
682                         if (dflt_vnic < 0) {
683                                 /* This simply indicates there's no driver
684                                  * loaded. This is not an error.
685                                  */
686                                 rte_flow_error_set
687                                         (error,
688                                          EINVAL,
689                                          RTE_FLOW_ERROR_TYPE_ITEM,
690                                          item,
691                                          "Unable to get default VNIC for VF");
692                                 return -rte_errno;
693                         }
694
695                         filter->mirror_vnic_id = dflt_vnic;
696                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
697                         break;
698                 default:
699                         break;
700                 }
701                 item++;
702         }
703         filter->enables = en;
704         filter->valid_flags = valid_flags;
705
706         return 0;
707 }
708
709 /* Parse attributes */
710 static int
711 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
712                      struct rte_flow_error *error)
713 {
714         /* Must be input direction */
715         if (!attr->ingress) {
716                 rte_flow_error_set(error,
717                                    EINVAL,
718                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
719                                    attr,
720                                    "Only support ingress.");
721                 return -rte_errno;
722         }
723
724         /* Not supported */
725         if (attr->egress) {
726                 rte_flow_error_set(error,
727                                    EINVAL,
728                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
729                                    attr,
730                                    "No support for egress.");
731                 return -rte_errno;
732         }
733
734         return 0;
735 }
736
737 static struct bnxt_filter_info *
738 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
739 {
740         struct bnxt_filter_info *mf, *f0;
741         struct bnxt_vnic_info *vnic0;
742         struct rte_flow *flow;
743         int i;
744
745         vnic0 = &bp->vnic_info[0];
746         f0 = STAILQ_FIRST(&vnic0->filter);
747
748         /* This flow has same DST MAC as the port/l2 filter. */
749         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
750                 return f0;
751
752         for (i = bp->max_vnics - 1; i >= 0; i--) {
753                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
754
755                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
756                         continue;
757
758                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
759                         mf = flow->filter;
760
761                         if (mf->matching_l2_fltr_ptr)
762                                 continue;
763
764                         if (mf->ethertype == nf->ethertype &&
765                             mf->l2_ovlan == nf->l2_ovlan &&
766                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
767                             mf->l2_ivlan == nf->l2_ivlan &&
768                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
769                             !memcmp(mf->src_macaddr, nf->src_macaddr,
770                                     RTE_ETHER_ADDR_LEN) &&
771                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
772                                     RTE_ETHER_ADDR_LEN))
773                                 return mf;
774                 }
775         }
776         return NULL;
777 }
778
779 static struct bnxt_filter_info *
780 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
781                       struct bnxt_vnic_info *vnic)
782 {
783         struct bnxt_filter_info *filter1;
784         int rc;
785
786         /* Alloc new L2 filter.
787          * This flow needs MAC filter which does not match any existing
788          * L2 filters.
789          */
790         filter1 = bnxt_get_unused_filter(bp);
791         if (filter1 == NULL)
792                 return NULL;
793
794         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
795         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
796         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
797             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
798                 filter1->flags |=
799                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
800                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
801         }
802
803         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
804             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
805              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
806                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
807                 filter1->flags |=
808                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
809                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
810         } else {
811                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
812                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
813         }
814
815         if (nf->priority &&
816             (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
817              nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
818                 /* Tell the FW where to place the filter in the table. */
819                 if (nf->priority > 65535) {
820                         filter1->pri_hint =
821                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
822                         /* This will place the filter in TCAM */
823                         filter1->l2_filter_id_hint = (uint64_t)-1;
824                 }
825         }
826
827         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
828                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
829         memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
830         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
831                                      filter1);
832         if (rc) {
833                 bnxt_free_filter(bp, filter1);
834                 return NULL;
835         }
836         filter1->l2_ref_cnt++;
837         return filter1;
838 }
839
840 struct bnxt_filter_info *
841 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
842                    struct bnxt_vnic_info *vnic)
843 {
844         struct bnxt_filter_info *l2_filter = NULL;
845
846         l2_filter = bnxt_find_matching_l2_filter(bp, nf);
847         if (l2_filter) {
848                 l2_filter->l2_ref_cnt++;
849                 nf->matching_l2_fltr_ptr = l2_filter;
850         } else {
851                 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
852                 nf->matching_l2_fltr_ptr = NULL;
853         }
854
855         return l2_filter;
856 }
857
858 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
859 {
860         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
861         uint64_t rx_offloads = dev_conf->rxmode.offloads;
862         int rc;
863
864         rc = bnxt_vnic_grp_alloc(bp, vnic);
865         if (rc)
866                 goto ret;
867
868         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
869         if (rc) {
870                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
871                 goto ret;
872         }
873         bp->nr_vnics++;
874
875         /* RSS context is required only when there is more than one RSS ring */
876         if (vnic->rx_queue_cnt > 1) {
877                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
878                 if (rc) {
879                         PMD_DRV_LOG(ERR,
880                                     "HWRM vnic ctx alloc failure: %x\n", rc);
881                         goto ret;
882                 }
883         } else {
884                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
885         }
886
887         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
888                 vnic->vlan_strip = true;
889         else
890                 vnic->vlan_strip = false;
891
892         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
893         if (rc)
894                 goto ret;
895
896         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
897
898 ret:
899         return rc;
900 }
901
902 static int match_vnic_rss_cfg(struct bnxt *bp,
903                               struct bnxt_vnic_info *vnic,
904                               const struct rte_flow_action_rss *rss)
905 {
906         unsigned int match = 0, i;
907
908         if (vnic->rx_queue_cnt != rss->queue_num)
909                 return -EINVAL;
910
911         for (i = 0; i < rss->queue_num; i++) {
912                 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
913                     !bp->rx_queues[rss->queue[i]]->rx_started)
914                         return -EINVAL;
915         }
916
917         for (i = 0; i < vnic->rx_queue_cnt; i++) {
918                 int j;
919
920                 for (j = 0; j < vnic->rx_queue_cnt; j++) {
921                         if (bp->grp_info[rss->queue[i]].fw_grp_id ==
922                             vnic->fw_grp_ids[j])
923                                 match++;
924                 }
925         }
926
927         if (match != vnic->rx_queue_cnt) {
928                 PMD_DRV_LOG(ERR,
929                             "VNIC queue count %d vs queues matched %d\n",
930                             match, vnic->rx_queue_cnt);
931                 return -EINVAL;
932         }
933
934         return 0;
935 }
936
937 static void
938 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
939                             struct bnxt_filter_info *filter1,
940                             int use_ntuple)
941 {
942         if (!use_ntuple &&
943             !(filter->valid_flags &
944               ~(BNXT_FLOW_L2_DST_VALID_FLAG |
945                 BNXT_FLOW_L2_SRC_VALID_FLAG |
946                 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
947                 BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
948                 filter->flags = filter1->flags;
949                 filter->enables = filter1->enables;
950                 filter->filter_type = HWRM_CFA_L2_FILTER;
951                 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
952                 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
953                 filter->pri_hint = filter1->pri_hint;
954                 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
955         }
956         filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
957         filter->l2_ref_cnt = filter1->l2_ref_cnt;
958         PMD_DRV_LOG(DEBUG,
959                 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
960                 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
961 }
962
963 static int
964 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
965                              const struct rte_flow_item pattern[],
966                              const struct rte_flow_action actions[],
967                              const struct rte_flow_attr *attr,
968                              struct rte_flow_error *error,
969                              struct bnxt_filter_info *filter)
970 {
971         const struct rte_flow_action *act =
972                 bnxt_flow_non_void_action(actions);
973         struct bnxt *bp = dev->data->dev_private;
974         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
975         const struct rte_flow_action_queue *act_q;
976         const struct rte_flow_action_vf *act_vf;
977         struct bnxt_filter_info *filter1 = NULL;
978         const struct rte_flow_action_rss *rss;
979         struct bnxt_vnic_info *vnic, *vnic0;
980         struct bnxt_rx_queue *rxq = NULL;
981         int dflt_vnic, vnic_id;
982         unsigned int rss_idx;
983         uint32_t vf = 0, i;
984         int rc, use_ntuple;
985
986         rc =
987         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
988         if (rc != 0)
989                 goto ret;
990
991         rc = bnxt_flow_parse_attr(attr, error);
992         if (rc != 0)
993                 goto ret;
994
995         /* Since we support ingress attribute only - right now. */
996         if (filter->filter_type == HWRM_CFA_EM_FILTER)
997                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
998
999         use_ntuple = bnxt_filter_type_check(pattern, error);
1000         switch (act->type) {
1001         case RTE_FLOW_ACTION_TYPE_QUEUE:
1002                 /* Allow this flow. Redirect to a VNIC. */
1003                 act_q = (const struct rte_flow_action_queue *)act->conf;
1004                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1005                         rte_flow_error_set(error,
1006                                            EINVAL,
1007                                            RTE_FLOW_ERROR_TYPE_ACTION,
1008                                            act,
1009                                            "Invalid queue ID.");
1010                         rc = -rte_errno;
1011                         goto ret;
1012                 }
1013                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1014
1015                 vnic_id = attr->group;
1016                 if (!vnic_id) {
1017                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1018                         vnic_id = act_q->index;
1019                 }
1020
1021                 vnic = &bp->vnic_info[vnic_id];
1022                 if (vnic == NULL) {
1023                         rte_flow_error_set(error,
1024                                            EINVAL,
1025                                            RTE_FLOW_ERROR_TYPE_ACTION,
1026                                            act,
1027                                            "No matching VNIC found.");
1028                         rc = -rte_errno;
1029                         goto ret;
1030                 }
1031                 if (vnic->rx_queue_cnt) {
1032                         if (vnic->start_grp_id != act_q->index) {
1033                                 PMD_DRV_LOG(ERR,
1034                                             "VNIC already in use\n");
1035                                 rte_flow_error_set(error,
1036                                                    EINVAL,
1037                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1038                                                    act,
1039                                                    "VNIC already in use");
1040                                 rc = -rte_errno;
1041                                 goto ret;
1042                         }
1043                         goto use_vnic;
1044                 }
1045
1046                 rxq = bp->rx_queues[act_q->index];
1047
1048                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1049                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
1050                         goto use_vnic;
1051
1052                 //if (!rxq ||
1053                     //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1054                     //INVALID_HW_RING_ID ||
1055                     //!rxq->rx_deferred_start) {
1056                 if (!rxq ||
1057                     bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1058                     INVALID_HW_RING_ID) {
1059                         PMD_DRV_LOG(ERR,
1060                                     "Queue invalid or used with other VNIC\n");
1061                         rte_flow_error_set(error,
1062                                            EINVAL,
1063                                            RTE_FLOW_ERROR_TYPE_ACTION,
1064                                            act,
1065                                            "Queue invalid queue or in use");
1066                         rc = -rte_errno;
1067                         goto ret;
1068                 }
1069
1070                 rxq->vnic = vnic;
1071                 rxq->rx_started = 1;
1072                 vnic->rx_queue_cnt++;
1073                 vnic->start_grp_id = act_q->index;
1074                 vnic->end_grp_id = act_q->index;
1075                 vnic->func_default = 0; //This is not a default VNIC.
1076
1077                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1078
1079                 rc = bnxt_vnic_prep(bp, vnic);
1080                 if (rc)
1081                         goto ret;
1082
1083                 PMD_DRV_LOG(DEBUG,
1084                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1085                             act_q->index, vnic, vnic->fw_grp_ids);
1086
1087 use_vnic:
1088                 vnic->ff_pool_idx = vnic_id;
1089                 PMD_DRV_LOG(DEBUG,
1090                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1091                 filter->dst_id = vnic->fw_vnic_id;
1092                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1093                 if (filter1 == NULL) {
1094                         rc = -ENOSPC;
1095                         goto ret;
1096                 }
1097
1098                 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1099                             filter, filter1, filter1->l2_ref_cnt);
1100                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1101                 break;
1102         case RTE_FLOW_ACTION_TYPE_DROP:
1103                 vnic0 = &bp->vnic_info[0];
1104                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1105                 if (filter1 == NULL) {
1106                         rc = -ENOSPC;
1107                         goto ret;
1108                 }
1109
1110                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1111                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1112                         filter->flags =
1113                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1114                 else
1115                         filter->flags =
1116                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1117                 break;
1118         case RTE_FLOW_ACTION_TYPE_COUNT:
1119                 vnic0 = &bp->vnic_info[0];
1120                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1121                 if (filter1 == NULL) {
1122                         rc = -ENOSPC;
1123                         goto ret;
1124                 }
1125
1126                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1127                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1128                 break;
1129         case RTE_FLOW_ACTION_TYPE_VF:
1130                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1131                 vf = act_vf->id;
1132
1133                 if (filter->tunnel_type ==
1134                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1135                     filter->tunnel_type ==
1136                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1137                         /* If issued on a VF, ensure id is 0 and is trusted */
1138                         if (BNXT_VF(bp)) {
1139                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1140                                         rte_flow_error_set(error, EINVAL,
1141                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1142                                                 act,
1143                                                 "Incorrect VF");
1144                                         rc = -rte_errno;
1145                                         goto ret;
1146                                 }
1147                         }
1148
1149                         filter->enables |= filter->tunnel_type;
1150                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1151                         goto done;
1152                 }
1153
1154                 if (vf >= bp->pdev->max_vfs) {
1155                         rte_flow_error_set(error,
1156                                            EINVAL,
1157                                            RTE_FLOW_ERROR_TYPE_ACTION,
1158                                            act,
1159                                            "Incorrect VF id!");
1160                         rc = -rte_errno;
1161                         goto ret;
1162                 }
1163
1164                 filter->mirror_vnic_id =
1165                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1166                 if (dflt_vnic < 0) {
1167                         /* This simply indicates there's no driver loaded.
1168                          * This is not an error.
1169                          */
1170                         rte_flow_error_set(error,
1171                                            EINVAL,
1172                                            RTE_FLOW_ERROR_TYPE_ACTION,
1173                                            act,
1174                                            "Unable to get default VNIC for VF");
1175                         rc = -rte_errno;
1176                         goto ret;
1177                 }
1178
1179                 filter->mirror_vnic_id = dflt_vnic;
1180                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1181
1182                 vnic0 = &bp->vnic_info[0];
1183                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1184                 if (filter1 == NULL) {
1185                         rc = -ENOSPC;
1186                         goto ret;
1187                 }
1188
1189                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1190                 break;
1191         case RTE_FLOW_ACTION_TYPE_RSS:
1192                 rss = (const struct rte_flow_action_rss *)act->conf;
1193
1194                 vnic_id = attr->group;
1195                 if (!vnic_id) {
1196                         PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1197                         rte_flow_error_set(error,
1198                                            EINVAL,
1199                                            RTE_FLOW_ERROR_TYPE_ATTR,
1200                                            NULL,
1201                                            "Group id cannot be 0");
1202                         rc = -rte_errno;
1203                         goto ret;
1204                 }
1205
1206                 vnic = &bp->vnic_info[vnic_id];
1207                 if (vnic == NULL) {
1208                         rte_flow_error_set(error,
1209                                            EINVAL,
1210                                            RTE_FLOW_ERROR_TYPE_ACTION,
1211                                            act,
1212                                            "No matching VNIC for RSS group.");
1213                         rc = -rte_errno;
1214                         goto ret;
1215                 }
1216                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1217
1218                 /* Check if requested RSS config matches RSS config of VNIC
1219                  * only if it is not a fresh VNIC configuration.
1220                  * Otherwise the existing VNIC configuration can be used.
1221                  */
1222                 if (vnic->rx_queue_cnt) {
1223                         rc = match_vnic_rss_cfg(bp, vnic, rss);
1224                         if (rc) {
1225                                 PMD_DRV_LOG(ERR,
1226                                             "VNIC and RSS config mismatch\n");
1227                                 rte_flow_error_set(error,
1228                                                    EINVAL,
1229                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1230                                                    act,
1231                                                    "VNIC and RSS cfg mismatch");
1232                                 rc = -rte_errno;
1233                                 goto ret;
1234                         }
1235                         goto vnic_found;
1236                 }
1237
1238                 for (i = 0; i < rss->queue_num; i++) {
1239                         PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1240                                     rss->queue[i]);
1241
1242                         if (!rss->queue[i] ||
1243                             rss->queue[i] >= bp->rx_nr_rings ||
1244                             !bp->rx_queues[rss->queue[i]]) {
1245                                 rte_flow_error_set(error,
1246                                                    EINVAL,
1247                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1248                                                    act,
1249                                                    "Invalid queue ID for RSS");
1250                                 rc = -rte_errno;
1251                                 goto ret;
1252                         }
1253                         rxq = bp->rx_queues[rss->queue[i]];
1254
1255                         //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1256                             //INVALID_HW_RING_ID ||
1257                             //!rxq->rx_deferred_start) {
1258                         if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1259                             INVALID_HW_RING_ID) {
1260                                 PMD_DRV_LOG(ERR,
1261                                             "queue active with other VNIC\n");
1262                                 rte_flow_error_set(error,
1263                                                    EINVAL,
1264                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1265                                                    act,
1266                                                    "Invalid queue ID for RSS");
1267                                 rc = -rte_errno;
1268                                 goto ret;
1269                         }
1270
1271                         rxq->vnic = vnic;
1272                         rxq->rx_started = 1;
1273                         vnic->rx_queue_cnt++;
1274                 }
1275
1276                 vnic->start_grp_id = rss->queue[0];
1277                 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1278                 vnic->func_default = 0; //This is not a default VNIC.
1279
1280                 rc = bnxt_vnic_prep(bp, vnic);
1281                 if (rc)
1282                         goto ret;
1283
1284                 PMD_DRV_LOG(DEBUG,
1285                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1286                             vnic_id, vnic, vnic->fw_grp_ids);
1287
1288                 vnic->ff_pool_idx = vnic_id;
1289                 PMD_DRV_LOG(DEBUG,
1290                             "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1291
1292                 /* This can be done only after vnic_grp_alloc is done. */
1293                 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1294                         vnic->fw_grp_ids[i] =
1295                                 bp->grp_info[rss->queue[i]].fw_grp_id;
1296                         /* Make sure vnic0 does not use these rings. */
1297                         bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1298                                 INVALID_HW_RING_ID;
1299                 }
1300
1301                 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1302                         for (i = 0; i < vnic->rx_queue_cnt; i++)
1303                                 vnic->rss_table[rss_idx++] =
1304                                         vnic->fw_grp_ids[i];
1305                 }
1306
1307                 /* Configure RSS only if the queue count is > 1 */
1308                 if (vnic->rx_queue_cnt > 1) {
1309                         vnic->hash_type =
1310                                 bnxt_rte_to_hwrm_hash_types(rss->types);
1311
1312                         if (!rss->key_len) {
1313                                 /* If hash key has not been specified,
1314                                  * use random hash key.
1315                                  */
1316                                 prandom_bytes(vnic->rss_hash_key,
1317                                               HW_HASH_KEY_SIZE);
1318                         } else {
1319                                 if (rss->key_len > HW_HASH_KEY_SIZE)
1320                                         memcpy(vnic->rss_hash_key,
1321                                                rss->key,
1322                                                HW_HASH_KEY_SIZE);
1323                                 else
1324                                         memcpy(vnic->rss_hash_key,
1325                                                rss->key,
1326                                                rss->key_len);
1327                         }
1328                         bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1329                 } else {
1330                         PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1331                 }
1332
1333 vnic_found:
1334                 filter->dst_id = vnic->fw_vnic_id;
1335                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1336                 if (filter1 == NULL) {
1337                         rc = -ENOSPC;
1338                         goto ret;
1339                 }
1340
1341                 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1342                 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1343                 break;
1344         default:
1345                 rte_flow_error_set(error,
1346                                    EINVAL,
1347                                    RTE_FLOW_ERROR_TYPE_ACTION,
1348                                    act,
1349                                    "Invalid action.");
1350                 rc = -rte_errno;
1351                 goto ret;
1352         }
1353
1354         if (filter1 && !filter->matching_l2_fltr_ptr) {
1355                 bnxt_free_filter(bp, filter1);
1356                 filter1->fw_l2_filter_id = -1;
1357         }
1358
1359 done:
1360         act = bnxt_flow_non_void_action(++act);
1361         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1362                 rte_flow_error_set(error,
1363                                    EINVAL,
1364                                    RTE_FLOW_ERROR_TYPE_ACTION,
1365                                    act,
1366                                    "Invalid action.");
1367                 rc = -rte_errno;
1368                 goto ret;
1369         }
1370 ret:
1371         return rc;
1372 }
1373
1374 static
1375 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1376                                           struct bnxt_filter_info *filter)
1377 {
1378         struct bnxt_vnic_info *vnic = NULL;
1379         unsigned int i;
1380
1381         for (i = 0; i < bp->max_vnics; i++) {
1382                 vnic = &bp->vnic_info[i];
1383                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1384                     filter->dst_id == vnic->fw_vnic_id) {
1385                         PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1386                                     vnic->ff_pool_idx);
1387                         return vnic;
1388                 }
1389         }
1390         return NULL;
1391 }
1392
1393 static int
1394 bnxt_flow_validate(struct rte_eth_dev *dev,
1395                    const struct rte_flow_attr *attr,
1396                    const struct rte_flow_item pattern[],
1397                    const struct rte_flow_action actions[],
1398                    struct rte_flow_error *error)
1399 {
1400         struct bnxt *bp = dev->data->dev_private;
1401         struct bnxt_vnic_info *vnic = NULL;
1402         struct bnxt_filter_info *filter;
1403         int ret = 0;
1404
1405         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1406         if (ret != 0)
1407                 return ret;
1408
1409         filter = bnxt_get_unused_filter(bp);
1410         if (filter == NULL) {
1411                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1412                 return -ENOMEM;
1413         }
1414
1415         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1416                                            error, filter);
1417
1418         vnic = find_matching_vnic(bp, filter);
1419         if (vnic) {
1420                 if (STAILQ_EMPTY(&vnic->filter)) {
1421                         rte_free(vnic->fw_grp_ids);
1422                         bnxt_hwrm_vnic_ctx_free(bp, vnic);
1423                         bnxt_hwrm_vnic_free(bp, vnic);
1424                         vnic->rx_queue_cnt = 0;
1425                         bp->nr_vnics--;
1426                         PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1427                 }
1428         }
1429
1430         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1431                 bnxt_hwrm_clear_em_filter(bp, filter);
1432         else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1433                 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1434         else
1435                 bnxt_hwrm_clear_l2_filter(bp, filter);
1436
1437         /* No need to hold on to this filter if we are just validating flow */
1438         filter->fw_l2_filter_id = UINT64_MAX;
1439         bnxt_free_filter(bp, filter);
1440
1441         return ret;
1442 }
1443
1444 static void
1445 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1446                    struct bnxt_filter_info *new_filter)
1447 {
1448         /* Clear the new L2 filter that was created in the previous step in
1449          * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1450          * filter which points to the new destination queue and so we clear
1451          * the previous L2 filter. For ntuple filters, we are going to reuse
1452          * the old L2 filter and create new NTUPLE filter with this new
1453          * destination queue subsequently during bnxt_flow_create.
1454          */
1455         if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1456                 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1457                 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1458         } else {
1459                 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1460                         bnxt_hwrm_clear_em_filter(bp, old_filter);
1461                 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1462                         bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1463         }
1464 }
1465
1466 static int
1467 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1468 {
1469         struct bnxt_filter_info *mf;
1470         struct rte_flow *flow;
1471         int i;
1472
1473         for (i = bp->max_vnics - 1; i >= 0; i--) {
1474                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1475
1476                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1477                         continue;
1478
1479                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1480                         mf = flow->filter;
1481
1482                         if (mf->filter_type == nf->filter_type &&
1483                             mf->flags == nf->flags &&
1484                             mf->src_port == nf->src_port &&
1485                             mf->src_port_mask == nf->src_port_mask &&
1486                             mf->dst_port == nf->dst_port &&
1487                             mf->dst_port_mask == nf->dst_port_mask &&
1488                             mf->ip_protocol == nf->ip_protocol &&
1489                             mf->ip_addr_type == nf->ip_addr_type &&
1490                             mf->ethertype == nf->ethertype &&
1491                             mf->vni == nf->vni &&
1492                             mf->tunnel_type == nf->tunnel_type &&
1493                             mf->l2_ovlan == nf->l2_ovlan &&
1494                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1495                             mf->l2_ivlan == nf->l2_ivlan &&
1496                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1497                             !memcmp(mf->l2_addr, nf->l2_addr,
1498                                     RTE_ETHER_ADDR_LEN) &&
1499                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1500                                     RTE_ETHER_ADDR_LEN) &&
1501                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1502                                     RTE_ETHER_ADDR_LEN) &&
1503                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1504                                     RTE_ETHER_ADDR_LEN) &&
1505                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1506                                     sizeof(nf->src_ipaddr)) &&
1507                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1508                                     sizeof(nf->src_ipaddr_mask)) &&
1509                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1510                                     sizeof(nf->dst_ipaddr)) &&
1511                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1512                                     sizeof(nf->dst_ipaddr_mask))) {
1513                                 if (mf->dst_id == nf->dst_id)
1514                                         return -EEXIST;
1515                                 /* Free the old filter, update flow
1516                                  * with new filter
1517                                  */
1518                                 bnxt_update_filter(bp, mf, nf);
1519                                 STAILQ_REMOVE(&vnic->filter, mf,
1520                                               bnxt_filter_info, next);
1521                                 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1522                                 bnxt_free_filter(bp, mf);
1523                                 flow->filter = nf;
1524                                 return -EXDEV;
1525                         }
1526                 }
1527         }
1528         return 0;
1529 }
1530
1531 static struct rte_flow *
1532 bnxt_flow_create(struct rte_eth_dev *dev,
1533                  const struct rte_flow_attr *attr,
1534                  const struct rte_flow_item pattern[],
1535                  const struct rte_flow_action actions[],
1536                  struct rte_flow_error *error)
1537 {
1538         struct bnxt *bp = dev->data->dev_private;
1539         struct bnxt_vnic_info *vnic = NULL;
1540         struct bnxt_filter_info *filter;
1541         bool update_flow = false;
1542         struct rte_flow *flow;
1543         int ret = 0;
1544         uint32_t tun_type;
1545
1546         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1547                 rte_flow_error_set(error, EINVAL,
1548                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1549                                    "Failed to create flow, Not a Trusted VF!");
1550                 return NULL;
1551         }
1552
1553         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1554         if (!flow) {
1555                 rte_flow_error_set(error, ENOMEM,
1556                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1557                                    "Failed to allocate memory");
1558                 return flow;
1559         }
1560
1561         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1562         if (ret != 0) {
1563                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1564                 goto free_flow;
1565         }
1566
1567         filter = bnxt_get_unused_filter(bp);
1568         if (filter == NULL) {
1569                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1570                 goto free_flow;
1571         }
1572
1573         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1574                                            error, filter);
1575         if (ret != 0)
1576                 goto free_filter;
1577
1578         ret = bnxt_match_filter(bp, filter);
1579         if (ret == -EEXIST) {
1580                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1581                 /* Clear the filter that was created as part of
1582                  * validate_and_parse_flow() above
1583                  */
1584                 bnxt_hwrm_clear_l2_filter(bp, filter);
1585                 goto free_filter;
1586         } else if (ret == -EXDEV) {
1587                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1588                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1589                 update_flow = true;
1590         }
1591
1592         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1593          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1594          * in such a case.
1595          */
1596         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1597             filter->enables == filter->tunnel_type) {
1598                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1599                 if (ret) {
1600                         rte_flow_error_set(error, -ret,
1601                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602                                            "Unable to query tunnel to VF");
1603                         goto free_filter;
1604                 }
1605                 if (tun_type == (1U << filter->tunnel_type)) {
1606                         ret =
1607                         bnxt_hwrm_tunnel_redirect_free(bp,
1608                                                        filter->tunnel_type);
1609                         if (ret) {
1610                                 PMD_DRV_LOG(ERR,
1611                                             "Unable to free existing tunnel\n");
1612                                 rte_flow_error_set(error, -ret,
1613                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1614                                                    NULL,
1615                                                    "Unable to free preexisting "
1616                                                    "tunnel on VF");
1617                                 goto free_filter;
1618                         }
1619                 }
1620                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1621                 if (ret) {
1622                         rte_flow_error_set(error, -ret,
1623                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1624                                            "Unable to redirect tunnel to VF");
1625                         goto free_filter;
1626                 }
1627                 vnic = &bp->vnic_info[0];
1628                 goto done;
1629         }
1630
1631         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1632                 filter->enables |=
1633                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1634                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1635         }
1636
1637         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1638                 filter->enables |=
1639                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1640                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1641         }
1642
1643         vnic = find_matching_vnic(bp, filter);
1644 done:
1645         if (!ret || update_flow) {
1646                 flow->filter = filter;
1647                 flow->vnic = vnic;
1648                 /* VNIC is set only in case of queue or RSS action */
1649                 if (vnic) {
1650                         /*
1651                          * RxQ0 is not used for flow filters.
1652                          */
1653
1654                         if (update_flow) {
1655                                 ret = -EXDEV;
1656                                 goto free_flow;
1657                         }
1658                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1659                 }
1660                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1661                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1662                 return flow;
1663         }
1664         if (!ret) {
1665                 flow->filter = filter;
1666                 flow->vnic = vnic;
1667                 if (update_flow) {
1668                         ret = -EXDEV;
1669                         goto free_flow;
1670                 }
1671                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1672                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1673                 return flow;
1674         }
1675 free_filter:
1676         bnxt_free_filter(bp, filter);
1677 free_flow:
1678         if (ret == -EEXIST)
1679                 rte_flow_error_set(error, ret,
1680                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1681                                    "Matching Flow exists.");
1682         else if (ret == -EXDEV)
1683                 rte_flow_error_set(error, 0,
1684                                    RTE_FLOW_ERROR_TYPE_NONE, NULL,
1685                                    "Flow with pattern exists, updating destination queue");
1686         else
1687                 rte_flow_error_set(error, -ret,
1688                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1689                                    "Failed to create flow.");
1690         rte_free(flow);
1691         flow = NULL;
1692         return flow;
1693 }
1694
1695 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1696                                                struct bnxt_filter_info *filter,
1697                                                struct rte_flow_error *error)
1698 {
1699         uint16_t tun_dst_fid;
1700         uint32_t tun_type;
1701         int ret = 0;
1702
1703         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1704         if (ret) {
1705                 rte_flow_error_set(error, -ret,
1706                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1707                                    "Unable to query tunnel to VF");
1708                 return ret;
1709         }
1710         if (tun_type == (1U << filter->tunnel_type)) {
1711                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1712                                                      &tun_dst_fid);
1713                 if (ret) {
1714                         rte_flow_error_set(error, -ret,
1715                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1716                                            NULL,
1717                                            "tunnel_redirect info cmd fail");
1718                         return ret;
1719                 }
1720                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1721                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1722
1723                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1724                  * cmd, just delete the flow from driver
1725                  */
1726                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1727                         PMD_DRV_LOG(ERR,
1728                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1729                 else
1730                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1731                                                         filter->tunnel_type);
1732         }
1733         return ret;
1734 }
1735
1736 static int
1737 bnxt_flow_destroy(struct rte_eth_dev *dev,
1738                   struct rte_flow *flow,
1739                   struct rte_flow_error *error)
1740 {
1741         struct bnxt *bp = dev->data->dev_private;
1742         struct bnxt_filter_info *filter = flow->filter;
1743         struct bnxt_vnic_info *vnic = flow->vnic;
1744         int ret = 0;
1745
1746         if (!filter) {
1747                 ret = -EINVAL;
1748                 goto done;
1749         }
1750
1751         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1752             filter->enables == filter->tunnel_type) {
1753                 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1754                                                           filter,
1755                                                           error);
1756                 if (!ret)
1757                         goto done;
1758                 else
1759                         return ret;
1760         }
1761
1762         ret = bnxt_match_filter(bp, filter);
1763         if (ret == 0)
1764                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1765
1766         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1767                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1768         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1769                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1770         ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1771
1772 done:
1773         if (!ret) {
1774                 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1775                 bnxt_free_filter(bp, filter);
1776                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1777                 rte_free(flow);
1778
1779                 /* If this was the last flow associated with this vnic,
1780                  * switch the queue back to RSS pool.
1781                  */
1782                 if (vnic && STAILQ_EMPTY(&vnic->flow_list)) {
1783                         rte_free(vnic->fw_grp_ids);
1784                         if (vnic->rx_queue_cnt > 1)
1785                                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1786
1787                         bnxt_hwrm_vnic_free(bp, vnic);
1788                         vnic->rx_queue_cnt = 0;
1789                         bp->nr_vnics--;
1790                 }
1791         } else {
1792                 rte_flow_error_set(error, -ret,
1793                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1794                                    "Failed to destroy flow.");
1795         }
1796
1797         return ret;
1798 }
1799
1800 static int
1801 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1802 {
1803         struct bnxt *bp = dev->data->dev_private;
1804         struct bnxt_vnic_info *vnic;
1805         struct rte_flow *flow;
1806         unsigned int i;
1807         int ret = 0;
1808
1809         for (i = 0; i < bp->max_vnics; i++) {
1810                 vnic = &bp->vnic_info[i];
1811                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1812                         continue;
1813
1814                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1815                         struct bnxt_filter_info *filter = flow->filter;
1816
1817                         if (filter->filter_type ==
1818                             HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1819                             filter->enables == filter->tunnel_type) {
1820                                 ret =
1821                                 bnxt_handle_tunnel_redirect_destroy(bp,
1822                                                                     filter,
1823                                                                     error);
1824                                 if (!ret)
1825                                         goto done;
1826                                 else
1827                                         return ret;
1828                         }
1829
1830                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1831                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1832                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1833                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1834                         else if (!i)
1835                                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1836
1837                         if (ret) {
1838                                 rte_flow_error_set
1839                                         (error,
1840                                          -ret,
1841                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1842                                          NULL,
1843                                          "Failed to flush flow in HW.");
1844                                 return -rte_errno;
1845                         }
1846 done:
1847                         bnxt_free_filter(bp, filter);
1848                         STAILQ_REMOVE(&vnic->flow_list, flow,
1849                                       rte_flow, next);
1850                         rte_free(flow);
1851                 }
1852         }
1853
1854         return ret;
1855 }
1856
1857 const struct rte_flow_ops bnxt_flow_ops = {
1858         .validate = bnxt_flow_validate,
1859         .create = bnxt_flow_create,
1860         .destroy = bnxt_flow_destroy,
1861         .flush = bnxt_flow_flush,
1862 };