net/bnxt: support redirecting tunnel packets to VF
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "bnxt_util.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 static int
22 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
23                         const struct rte_flow_item pattern[],
24                         const struct rte_flow_action actions[],
25                         struct rte_flow_error *error)
26 {
27         if (!pattern) {
28                 rte_flow_error_set(error,
29                                    EINVAL,
30                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
31                                    NULL,
32                                    "NULL pattern.");
33                 return -rte_errno;
34         }
35
36         if (!actions) {
37                 rte_flow_error_set(error,
38                                    EINVAL,
39                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
40                                    NULL,
41                                    "NULL action.");
42                 return -rte_errno;
43         }
44
45         if (!attr) {
46                 rte_flow_error_set(error,
47                                    EINVAL,
48                                    RTE_FLOW_ERROR_TYPE_ATTR,
49                                    NULL,
50                                    "NULL attribute.");
51                 return -rte_errno;
52         }
53
54         return 0;
55 }
56
57 static const struct rte_flow_item *
58 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
59 {
60         while (1) {
61                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
62                         return cur;
63                 cur++;
64         }
65 }
66
67 static const struct rte_flow_action *
68 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
69 {
70         while (1) {
71                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
72                         return cur;
73                 cur++;
74         }
75 }
76
77 static int
78 bnxt_filter_type_check(const struct rte_flow_item pattern[],
79                        struct rte_flow_error *error __rte_unused)
80 {
81         const struct rte_flow_item *item =
82                 bnxt_flow_non_void_item(pattern);
83         int use_ntuple = 1;
84
85         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
86                 switch (item->type) {
87                 case RTE_FLOW_ITEM_TYPE_ETH:
88                         use_ntuple = 1;
89                         break;
90                 case RTE_FLOW_ITEM_TYPE_VLAN:
91                         use_ntuple = 0;
92                         break;
93                 case RTE_FLOW_ITEM_TYPE_IPV4:
94                 case RTE_FLOW_ITEM_TYPE_IPV6:
95                 case RTE_FLOW_ITEM_TYPE_TCP:
96                 case RTE_FLOW_ITEM_TYPE_UDP:
97                         /* FALLTHROUGH */
98                         /* need ntuple match, reset exact match */
99                         if (!use_ntuple) {
100                                 PMD_DRV_LOG(ERR,
101                                         "VLAN flow cannot use NTUPLE filter\n");
102                                 rte_flow_error_set
103                                         (error,
104                                          EINVAL,
105                                          RTE_FLOW_ERROR_TYPE_ITEM,
106                                          item,
107                                          "Cannot use VLAN with NTUPLE");
108                                 return -rte_errno;
109                         }
110                         use_ntuple |= 1;
111                         break;
112                 case RTE_FLOW_ITEM_TYPE_ANY:
113                         use_ntuple = 0;
114                         break;
115                 default:
116                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
117                         use_ntuple |= 0;
118                 }
119                 item++;
120         }
121         return use_ntuple;
122 }
123
124 static int
125 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
126                                   const struct rte_flow_attr *attr,
127                                   const struct rte_flow_item pattern[],
128                                   struct rte_flow_error *error,
129                                   struct bnxt_filter_info *filter)
130 {
131         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
132         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
133         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
134         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
135         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
136         const struct rte_flow_item_udp *udp_spec, *udp_mask;
137         const struct rte_flow_item_eth *eth_spec, *eth_mask;
138         const struct rte_flow_item_nvgre *nvgre_spec;
139         const struct rte_flow_item_nvgre *nvgre_mask;
140         const struct rte_flow_item_gre *gre_spec;
141         const struct rte_flow_item_gre *gre_mask;
142         const struct rte_flow_item_vxlan *vxlan_spec;
143         const struct rte_flow_item_vxlan *vxlan_mask;
144         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
145         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
146         const struct rte_flow_item_vf *vf_spec;
147         uint32_t tenant_id_be = 0;
148         bool vni_masked = 0;
149         bool tni_masked = 0;
150         uint32_t vf = 0;
151         int use_ntuple;
152         uint32_t en = 0;
153         uint32_t en_ethertype;
154         int dflt_vnic, rc = 0;
155
156         use_ntuple = bnxt_filter_type_check(pattern, error);
157         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
158         if (use_ntuple < 0)
159                 return use_ntuple;
160
161         if (use_ntuple && (bp->eth_dev->data->dev_conf.rxmode.mq_mode &
162             ETH_MQ_RX_RSS)) {
163                 PMD_DRV_LOG(ERR, "Cannot create ntuple flow on RSS queues\n");
164                 rte_flow_error_set(error, EINVAL,
165                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
166                                    "Cannot create flow on RSS queues");
167                 rc = -rte_errno;
168                 return rc;
169         }
170
171         filter->filter_type = use_ntuple ?
172                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
173         en_ethertype = use_ntuple ?
174                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
175                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
176
177         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
178                 if (item->last) {
179                         /* last or range is NOT supported as match criteria */
180                         rte_flow_error_set(error, EINVAL,
181                                            RTE_FLOW_ERROR_TYPE_ITEM,
182                                            item,
183                                            "No support for range");
184                         return -rte_errno;
185                 }
186
187                 switch (item->type) {
188                 case RTE_FLOW_ITEM_TYPE_ETH:
189                         if (!item->spec || !item->mask)
190                                 break;
191
192                         eth_spec = item->spec;
193                         eth_mask = item->mask;
194
195                         /* Source MAC address mask cannot be partially set.
196                          * Should be All 0's or all 1's.
197                          * Destination MAC address mask must not be partially
198                          * set. Should be all 1's or all 0's.
199                          */
200                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
201                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
202                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
203                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
204                                 rte_flow_error_set(error,
205                                                    EINVAL,
206                                                    RTE_FLOW_ERROR_TYPE_ITEM,
207                                                    item,
208                                                    "MAC_addr mask not valid");
209                                 return -rte_errno;
210                         }
211
212                         /* Mask is not allowed. Only exact matches are */
213                         if (eth_mask->type &&
214                             eth_mask->type != RTE_BE16(0xffff)) {
215                                 rte_flow_error_set(error, EINVAL,
216                                                    RTE_FLOW_ERROR_TYPE_ITEM,
217                                                    item,
218                                                    "ethertype mask not valid");
219                                 return -rte_errno;
220                         }
221
222                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
223                                 rte_memcpy(filter->dst_macaddr,
224                                            &eth_spec->dst, 6);
225                                 en |= use_ntuple ?
226                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
227                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
228                         }
229
230                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
231                                 rte_memcpy(filter->src_macaddr,
232                                            &eth_spec->src, 6);
233                                 en |= use_ntuple ?
234                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
235                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
236                         } /*
237                            * else {
238                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
239                            * }
240                            */
241                         if (eth_mask->type) {
242                                 filter->ethertype =
243                                         rte_be_to_cpu_16(eth_spec->type);
244                                 en |= en_ethertype;
245                         }
246
247                         break;
248                 case RTE_FLOW_ITEM_TYPE_VLAN:
249                         vlan_spec = item->spec;
250                         vlan_mask = item->mask;
251                         if (en & en_ethertype) {
252                                 rte_flow_error_set(error, EINVAL,
253                                                    RTE_FLOW_ERROR_TYPE_ITEM,
254                                                    item,
255                                                    "VLAN TPID matching is not"
256                                                    " supported");
257                                 return -rte_errno;
258                         }
259                         if (vlan_mask->tci &&
260                             vlan_mask->tci == RTE_BE16(0x0fff)) {
261                                 /* Only the VLAN ID can be matched. */
262                                 filter->l2_ovlan =
263                                         rte_be_to_cpu_16(vlan_spec->tci &
264                                                          RTE_BE16(0x0fff));
265                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
266                         } else {
267                                 rte_flow_error_set(error,
268                                                    EINVAL,
269                                                    RTE_FLOW_ERROR_TYPE_ITEM,
270                                                    item,
271                                                    "VLAN mask is invalid");
272                                 return -rte_errno;
273                         }
274                         if (vlan_mask->inner_type &&
275                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
276                                 rte_flow_error_set(error, EINVAL,
277                                                    RTE_FLOW_ERROR_TYPE_ITEM,
278                                                    item,
279                                                    "inner ethertype mask not"
280                                                    " valid");
281                                 return -rte_errno;
282                         }
283                         if (vlan_mask->inner_type) {
284                                 filter->ethertype =
285                                         rte_be_to_cpu_16(vlan_spec->inner_type);
286                                 en |= en_ethertype;
287                         }
288
289                         break;
290                 case RTE_FLOW_ITEM_TYPE_IPV4:
291                         /* If mask is not involved, we could use EM filters. */
292                         ipv4_spec = item->spec;
293                         ipv4_mask = item->mask;
294
295                         if (!item->spec || !item->mask)
296                                 break;
297
298                         /* Only IP DST and SRC fields are maskable. */
299                         if (ipv4_mask->hdr.version_ihl ||
300                             ipv4_mask->hdr.type_of_service ||
301                             ipv4_mask->hdr.total_length ||
302                             ipv4_mask->hdr.packet_id ||
303                             ipv4_mask->hdr.fragment_offset ||
304                             ipv4_mask->hdr.time_to_live ||
305                             ipv4_mask->hdr.next_proto_id ||
306                             ipv4_mask->hdr.hdr_checksum) {
307                                 rte_flow_error_set(error,
308                                                    EINVAL,
309                                                    RTE_FLOW_ERROR_TYPE_ITEM,
310                                                    item,
311                                                    "Invalid IPv4 mask.");
312                                 return -rte_errno;
313                         }
314
315                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
316                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
317
318                         if (use_ntuple)
319                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
320                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
321                         else
322                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
323                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
324
325                         if (ipv4_mask->hdr.src_addr) {
326                                 filter->src_ipaddr_mask[0] =
327                                         ipv4_mask->hdr.src_addr;
328                                 en |= !use_ntuple ? 0 :
329                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
330                         }
331
332                         if (ipv4_mask->hdr.dst_addr) {
333                                 filter->dst_ipaddr_mask[0] =
334                                         ipv4_mask->hdr.dst_addr;
335                                 en |= !use_ntuple ? 0 :
336                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
337                         }
338
339                         filter->ip_addr_type = use_ntuple ?
340                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
341                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
342
343                         if (ipv4_spec->hdr.next_proto_id) {
344                                 filter->ip_protocol =
345                                         ipv4_spec->hdr.next_proto_id;
346                                 if (use_ntuple)
347                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
348                                 else
349                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
350                         }
351                         break;
352                 case RTE_FLOW_ITEM_TYPE_IPV6:
353                         ipv6_spec = item->spec;
354                         ipv6_mask = item->mask;
355
356                         if (!item->spec || !item->mask)
357                                 break;
358
359                         /* Only IP DST and SRC fields are maskable. */
360                         if (ipv6_mask->hdr.vtc_flow ||
361                             ipv6_mask->hdr.payload_len ||
362                             ipv6_mask->hdr.proto ||
363                             ipv6_mask->hdr.hop_limits) {
364                                 rte_flow_error_set(error,
365                                                    EINVAL,
366                                                    RTE_FLOW_ERROR_TYPE_ITEM,
367                                                    item,
368                                                    "Invalid IPv6 mask.");
369                                 return -rte_errno;
370                         }
371
372                         if (use_ntuple)
373                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
374                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
375                         else
376                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
377                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
378
379                         rte_memcpy(filter->src_ipaddr,
380                                    ipv6_spec->hdr.src_addr, 16);
381                         rte_memcpy(filter->dst_ipaddr,
382                                    ipv6_spec->hdr.dst_addr, 16);
383
384                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
385                                                    16)) {
386                                 rte_memcpy(filter->src_ipaddr_mask,
387                                            ipv6_mask->hdr.src_addr, 16);
388                                 en |= !use_ntuple ? 0 :
389                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
390                         }
391
392                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
393                                                    16)) {
394                                 rte_memcpy(filter->dst_ipaddr_mask,
395                                            ipv6_mask->hdr.dst_addr, 16);
396                                 en |= !use_ntuple ? 0 :
397                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
398                         }
399
400                         filter->ip_addr_type = use_ntuple ?
401                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
402                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
403                         break;
404                 case RTE_FLOW_ITEM_TYPE_TCP:
405                         tcp_spec = item->spec;
406                         tcp_mask = item->mask;
407
408                         if (!item->spec || !item->mask)
409                                 break;
410
411                         /* Check TCP mask. Only DST & SRC ports are maskable */
412                         if (tcp_mask->hdr.sent_seq ||
413                             tcp_mask->hdr.recv_ack ||
414                             tcp_mask->hdr.data_off ||
415                             tcp_mask->hdr.tcp_flags ||
416                             tcp_mask->hdr.rx_win ||
417                             tcp_mask->hdr.cksum ||
418                             tcp_mask->hdr.tcp_urp) {
419                                 rte_flow_error_set(error,
420                                                    EINVAL,
421                                                    RTE_FLOW_ERROR_TYPE_ITEM,
422                                                    item,
423                                                    "Invalid TCP mask");
424                                 return -rte_errno;
425                         }
426
427                         filter->src_port = tcp_spec->hdr.src_port;
428                         filter->dst_port = tcp_spec->hdr.dst_port;
429
430                         if (use_ntuple)
431                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
432                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
433                         else
434                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
435                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
436
437                         if (tcp_mask->hdr.dst_port) {
438                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
439                                 en |= !use_ntuple ? 0 :
440                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
441                         }
442
443                         if (tcp_mask->hdr.src_port) {
444                                 filter->src_port_mask = tcp_mask->hdr.src_port;
445                                 en |= !use_ntuple ? 0 :
446                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
447                         }
448                         break;
449                 case RTE_FLOW_ITEM_TYPE_UDP:
450                         udp_spec = item->spec;
451                         udp_mask = item->mask;
452
453                         if (!item->spec || !item->mask)
454                                 break;
455
456                         if (udp_mask->hdr.dgram_len ||
457                             udp_mask->hdr.dgram_cksum) {
458                                 rte_flow_error_set(error,
459                                                    EINVAL,
460                                                    RTE_FLOW_ERROR_TYPE_ITEM,
461                                                    item,
462                                                    "Invalid UDP mask");
463                                 return -rte_errno;
464                         }
465
466                         filter->src_port = udp_spec->hdr.src_port;
467                         filter->dst_port = udp_spec->hdr.dst_port;
468
469                         if (use_ntuple)
470                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
471                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
472                         else
473                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
474                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
475
476                         if (udp_mask->hdr.dst_port) {
477                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
478                                 en |= !use_ntuple ? 0 :
479                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
480                         }
481
482                         if (udp_mask->hdr.src_port) {
483                                 filter->src_port_mask = udp_mask->hdr.src_port;
484                                 en |= !use_ntuple ? 0 :
485                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
486                         }
487                         break;
488                 case RTE_FLOW_ITEM_TYPE_VXLAN:
489                         vxlan_spec = item->spec;
490                         vxlan_mask = item->mask;
491                         /* Check if VXLAN item is used to describe protocol.
492                          * If yes, both spec and mask should be NULL.
493                          * If no, both spec and mask shouldn't be NULL.
494                          */
495                         if ((!vxlan_spec && vxlan_mask) ||
496                             (vxlan_spec && !vxlan_mask)) {
497                                 rte_flow_error_set(error,
498                                                    EINVAL,
499                                                    RTE_FLOW_ERROR_TYPE_ITEM,
500                                                    item,
501                                                    "Invalid VXLAN item");
502                                 return -rte_errno;
503                         }
504
505                         if (!vxlan_spec && !vxlan_mask) {
506                                 filter->tunnel_type =
507                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
508                                 break;
509                         }
510
511                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
512                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
513                             vxlan_spec->flags != 0x8) {
514                                 rte_flow_error_set(error,
515                                                    EINVAL,
516                                                    RTE_FLOW_ERROR_TYPE_ITEM,
517                                                    item,
518                                                    "Invalid VXLAN item");
519                                 return -rte_errno;
520                         }
521
522                         /* Check if VNI is masked. */
523                         if (vxlan_spec && vxlan_mask) {
524                                 vni_masked =
525                                         !!memcmp(vxlan_mask->vni, vni_mask,
526                                                  RTE_DIM(vni_mask));
527                                 if (vni_masked) {
528                                         rte_flow_error_set
529                                                 (error,
530                                                  EINVAL,
531                                                  RTE_FLOW_ERROR_TYPE_ITEM,
532                                                  item,
533                                                  "Invalid VNI mask");
534                                         return -rte_errno;
535                                 }
536
537                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
538                                            vxlan_spec->vni, 3);
539                                 filter->vni =
540                                         rte_be_to_cpu_32(tenant_id_be);
541                                 filter->tunnel_type =
542                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
543                         }
544                         break;
545                 case RTE_FLOW_ITEM_TYPE_NVGRE:
546                         nvgre_spec = item->spec;
547                         nvgre_mask = item->mask;
548                         /* Check if NVGRE item is used to describe protocol.
549                          * If yes, both spec and mask should be NULL.
550                          * If no, both spec and mask shouldn't be NULL.
551                          */
552                         if ((!nvgre_spec && nvgre_mask) ||
553                             (nvgre_spec && !nvgre_mask)) {
554                                 rte_flow_error_set(error,
555                                                    EINVAL,
556                                                    RTE_FLOW_ERROR_TYPE_ITEM,
557                                                    item,
558                                                    "Invalid NVGRE item");
559                                 return -rte_errno;
560                         }
561
562                         if (!nvgre_spec && !nvgre_mask) {
563                                 filter->tunnel_type =
564                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
565                                 break;
566                         }
567
568                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
569                             nvgre_spec->protocol != 0x6558) {
570                                 rte_flow_error_set(error,
571                                                    EINVAL,
572                                                    RTE_FLOW_ERROR_TYPE_ITEM,
573                                                    item,
574                                                    "Invalid NVGRE item");
575                                 return -rte_errno;
576                         }
577
578                         if (nvgre_spec && nvgre_mask) {
579                                 tni_masked =
580                                         !!memcmp(nvgre_mask->tni, tni_mask,
581                                                  RTE_DIM(tni_mask));
582                                 if (tni_masked) {
583                                         rte_flow_error_set
584                                                 (error,
585                                                  EINVAL,
586                                                  RTE_FLOW_ERROR_TYPE_ITEM,
587                                                  item,
588                                                  "Invalid TNI mask");
589                                         return -rte_errno;
590                                 }
591                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
592                                            nvgre_spec->tni, 3);
593                                 filter->vni =
594                                         rte_be_to_cpu_32(tenant_id_be);
595                                 filter->tunnel_type =
596                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
597                         }
598                         break;
599
600                 case RTE_FLOW_ITEM_TYPE_GRE:
601                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
602                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
603
604                         /*
605                          *Check if GRE item is used to describe protocol.
606                          * If yes, both spec and mask should be NULL.
607                          * If no, both spec and mask shouldn't be NULL.
608                          */
609                         if (!!gre_spec ^ !!gre_mask) {
610                                 rte_flow_error_set(error, EINVAL,
611                                                    RTE_FLOW_ERROR_TYPE_ITEM,
612                                                    item,
613                                                    "Invalid GRE item");
614                                 return -rte_errno;
615                         }
616
617                         if (!gre_spec && !gre_mask) {
618                                 filter->tunnel_type =
619                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
620                                 break;
621                         }
622                         break;
623
624                 case RTE_FLOW_ITEM_TYPE_VF:
625                         vf_spec = item->spec;
626                         vf = vf_spec->id;
627                         if (!BNXT_PF(bp)) {
628                                 rte_flow_error_set(error,
629                                                    EINVAL,
630                                                    RTE_FLOW_ERROR_TYPE_ITEM,
631                                                    item,
632                                                    "Configuring on a VF!");
633                                 return -rte_errno;
634                         }
635
636                         if (vf >= bp->pdev->max_vfs) {
637                                 rte_flow_error_set(error,
638                                                    EINVAL,
639                                                    RTE_FLOW_ERROR_TYPE_ITEM,
640                                                    item,
641                                                    "Incorrect VF id!");
642                                 return -rte_errno;
643                         }
644
645                         if (!attr->transfer) {
646                                 rte_flow_error_set(error,
647                                                    ENOTSUP,
648                                                    RTE_FLOW_ERROR_TYPE_ITEM,
649                                                    item,
650                                                    "Matching VF traffic without"
651                                                    " affecting it (transfer attribute)"
652                                                    " is unsupported");
653                                 return -rte_errno;
654                         }
655
656                         filter->mirror_vnic_id =
657                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
658                         if (dflt_vnic < 0) {
659                                 /* This simply indicates there's no driver
660                                  * loaded. This is not an error.
661                                  */
662                                 rte_flow_error_set
663                                         (error,
664                                          EINVAL,
665                                          RTE_FLOW_ERROR_TYPE_ITEM,
666                                          item,
667                                          "Unable to get default VNIC for VF");
668                                 return -rte_errno;
669                         }
670
671                         filter->mirror_vnic_id = dflt_vnic;
672                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
673                         break;
674                 default:
675                         break;
676                 }
677                 item++;
678         }
679         filter->enables = en;
680
681         return 0;
682 }
683
684 /* Parse attributes */
685 static int
686 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
687                      struct rte_flow_error *error)
688 {
689         /* Must be input direction */
690         if (!attr->ingress) {
691                 rte_flow_error_set(error,
692                                    EINVAL,
693                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
694                                    attr,
695                                    "Only support ingress.");
696                 return -rte_errno;
697         }
698
699         /* Not supported */
700         if (attr->egress) {
701                 rte_flow_error_set(error,
702                                    EINVAL,
703                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
704                                    attr,
705                                    "No support for egress.");
706                 return -rte_errno;
707         }
708
709         /* Not supported */
710         if (attr->priority) {
711                 rte_flow_error_set(error,
712                                    EINVAL,
713                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
714                                    attr,
715                                    "No support for priority.");
716                 return -rte_errno;
717         }
718
719         /* Not supported */
720         if (attr->group) {
721                 rte_flow_error_set(error,
722                                    EINVAL,
723                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
724                                    attr,
725                                    "No support for group.");
726                 return -rte_errno;
727         }
728
729         return 0;
730 }
731
732 struct bnxt_filter_info *
733 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
734                    struct bnxt_vnic_info *vnic)
735 {
736         struct bnxt_filter_info *filter1, *f0;
737         struct bnxt_vnic_info *vnic0;
738         int rc;
739
740         vnic0 = &bp->vnic_info[0];
741         f0 = STAILQ_FIRST(&vnic0->filter);
742
743         /* This flow has same DST MAC as the port/l2 filter. */
744         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
745                 return f0;
746
747         /* This flow needs DST MAC which is not same as port/l2 */
748         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
749         filter1 = bnxt_get_unused_filter(bp);
750         if (filter1 == NULL)
751                 return NULL;
752
753         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
754         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
755                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
756         memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
757         memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
758         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
759                                      filter1);
760         if (rc) {
761                 bnxt_free_filter(bp, filter1);
762                 return NULL;
763         }
764         return filter1;
765 }
766
767 static int
768 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
769                              const struct rte_flow_item pattern[],
770                              const struct rte_flow_action actions[],
771                              const struct rte_flow_attr *attr,
772                              struct rte_flow_error *error,
773                              struct bnxt_filter_info *filter)
774 {
775         const struct rte_flow_action *act =
776                 bnxt_flow_non_void_action(actions);
777         struct bnxt *bp = dev->data->dev_private;
778         const struct rte_flow_action_queue *act_q;
779         const struct rte_flow_action_vf *act_vf;
780         struct bnxt_vnic_info *vnic, *vnic0;
781         struct bnxt_filter_info *filter1;
782         uint32_t vf = 0;
783         int dflt_vnic;
784         int rc;
785
786         rc =
787         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
788         if (rc != 0)
789                 goto ret;
790
791         rc = bnxt_flow_parse_attr(attr, error);
792         if (rc != 0)
793                 goto ret;
794
795         /* Since we support ingress attribute only - right now. */
796         if (filter->filter_type == HWRM_CFA_EM_FILTER)
797                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
798
799         switch (act->type) {
800         case RTE_FLOW_ACTION_TYPE_QUEUE:
801                 /* Allow this flow. Redirect to a VNIC. */
802                 act_q = (const struct rte_flow_action_queue *)act->conf;
803                 if (act_q->index >= bp->rx_nr_rings) {
804                         rte_flow_error_set(error,
805                                            EINVAL,
806                                            RTE_FLOW_ERROR_TYPE_ACTION,
807                                            act,
808                                            "Invalid queue ID.");
809                         rc = -rte_errno;
810                         goto ret;
811                 }
812                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
813
814                 vnic0 = &bp->vnic_info[0];
815                 vnic =  &bp->vnic_info[act_q->index];
816                 if (vnic == NULL) {
817                         rte_flow_error_set(error,
818                                            EINVAL,
819                                            RTE_FLOW_ERROR_TYPE_ACTION,
820                                            act,
821                                            "No matching VNIC for queue ID.");
822                         rc = -rte_errno;
823                         goto ret;
824                 }
825
826                 filter->dst_id = vnic->fw_vnic_id;
827                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
828                 if (filter1 == NULL) {
829                         rc = -ENOSPC;
830                         goto ret;
831                 }
832
833                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
834                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
835                 break;
836         case RTE_FLOW_ACTION_TYPE_DROP:
837                 vnic0 = &bp->vnic_info[0];
838                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
839                 if (filter1 == NULL) {
840                         rc = -ENOSPC;
841                         goto ret;
842                 }
843
844                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
845                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
846                         filter->flags =
847                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
848                 else
849                         filter->flags =
850                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
851                 break;
852         case RTE_FLOW_ACTION_TYPE_COUNT:
853                 vnic0 = &bp->vnic_info[0];
854                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
855                 if (filter1 == NULL) {
856                         rc = -ENOSPC;
857                         goto ret;
858                 }
859
860                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
861                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
862                 break;
863         case RTE_FLOW_ACTION_TYPE_VF:
864                 act_vf = (const struct rte_flow_action_vf *)act->conf;
865                 vf = act_vf->id;
866
867                 if (filter->tunnel_type ==
868                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
869                     filter->tunnel_type ==
870                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
871                         /* If issued on a VF, ensure id is 0 and is trusted */
872                         if (BNXT_VF(bp)) {
873                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
874                                         rte_flow_error_set(error, EINVAL,
875                                                 RTE_FLOW_ERROR_TYPE_ACTION,
876                                                 act,
877                                                 "Incorrect VF");
878                                         rc = -rte_errno;
879                                         goto ret;
880                                 }
881                         }
882
883                         filter->enables |= filter->tunnel_type;
884                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
885                         goto done;
886                 }
887
888                 if (vf >= bp->pdev->max_vfs) {
889                         rte_flow_error_set(error,
890                                            EINVAL,
891                                            RTE_FLOW_ERROR_TYPE_ACTION,
892                                            act,
893                                            "Incorrect VF id!");
894                         rc = -rte_errno;
895                         goto ret;
896                 }
897
898                 filter->mirror_vnic_id =
899                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
900                 if (dflt_vnic < 0) {
901                         /* This simply indicates there's no driver loaded.
902                          * This is not an error.
903                          */
904                         rte_flow_error_set(error,
905                                            EINVAL,
906                                            RTE_FLOW_ERROR_TYPE_ACTION,
907                                            act,
908                                            "Unable to get default VNIC for VF");
909                         rc = -rte_errno;
910                         goto ret;
911                 }
912
913                 filter->mirror_vnic_id = dflt_vnic;
914                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
915
916                 vnic0 = &bp->vnic_info[0];
917                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
918                 if (filter1 == NULL) {
919                         rc = -ENOSPC;
920                         goto ret;
921                 }
922
923                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
924                 break;
925
926         default:
927                 rte_flow_error_set(error,
928                                    EINVAL,
929                                    RTE_FLOW_ERROR_TYPE_ACTION,
930                                    act,
931                                    "Invalid action.");
932                 rc = -rte_errno;
933                 goto ret;
934         }
935
936         if (filter1) {
937                 bnxt_free_filter(bp, filter1);
938                 filter1->fw_l2_filter_id = -1;
939         }
940 done:
941         act = bnxt_flow_non_void_action(++act);
942         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
943                 rte_flow_error_set(error,
944                                    EINVAL,
945                                    RTE_FLOW_ERROR_TYPE_ACTION,
946                                    act,
947                                    "Invalid action.");
948                 rc = -rte_errno;
949                 goto ret;
950         }
951 ret:
952         return rc;
953 }
954
955 static int
956 bnxt_flow_validate(struct rte_eth_dev *dev,
957                    const struct rte_flow_attr *attr,
958                    const struct rte_flow_item pattern[],
959                    const struct rte_flow_action actions[],
960                    struct rte_flow_error *error)
961 {
962         struct bnxt *bp = dev->data->dev_private;
963         struct bnxt_filter_info *filter;
964         int ret = 0;
965
966         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
967         if (ret != 0)
968                 return ret;
969
970         filter = bnxt_get_unused_filter(bp);
971         if (filter == NULL) {
972                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
973                 return -ENOMEM;
974         }
975
976         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
977                                            error, filter);
978         /* No need to hold on to this filter if we are just validating flow */
979         filter->fw_l2_filter_id = UINT64_MAX;
980         bnxt_free_filter(bp, filter);
981
982         return ret;
983 }
984
985 static int
986 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
987 {
988         struct bnxt_filter_info *mf;
989         struct rte_flow *flow;
990         int i;
991
992         for (i = bp->nr_vnics - 1; i >= 0; i--) {
993                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
994
995                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
996                         mf = flow->filter;
997
998                         if (mf->filter_type == nf->filter_type &&
999                             mf->flags == nf->flags &&
1000                             mf->src_port == nf->src_port &&
1001                             mf->src_port_mask == nf->src_port_mask &&
1002                             mf->dst_port == nf->dst_port &&
1003                             mf->dst_port_mask == nf->dst_port_mask &&
1004                             mf->ip_protocol == nf->ip_protocol &&
1005                             mf->ip_addr_type == nf->ip_addr_type &&
1006                             mf->ethertype == nf->ethertype &&
1007                             mf->vni == nf->vni &&
1008                             mf->tunnel_type == nf->tunnel_type &&
1009                             mf->l2_ovlan == nf->l2_ovlan &&
1010                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1011                             mf->l2_ivlan == nf->l2_ivlan &&
1012                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1013                             !memcmp(mf->l2_addr, nf->l2_addr,
1014                                     RTE_ETHER_ADDR_LEN) &&
1015                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1016                                     RTE_ETHER_ADDR_LEN) &&
1017                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1018                                     RTE_ETHER_ADDR_LEN) &&
1019                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1020                                     RTE_ETHER_ADDR_LEN) &&
1021                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1022                                     sizeof(nf->src_ipaddr)) &&
1023                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1024                                     sizeof(nf->src_ipaddr_mask)) &&
1025                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1026                                     sizeof(nf->dst_ipaddr)) &&
1027                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1028                                     sizeof(nf->dst_ipaddr_mask))) {
1029                                 if (mf->dst_id == nf->dst_id)
1030                                         return -EEXIST;
1031                                 /*
1032                                  * Same Flow, Different queue
1033                                  * Clear the old ntuple filter
1034                                  * Reuse the matching L2 filter
1035                                  * ID for the new filter
1036                                  */
1037                                 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1038                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1039                                         bnxt_hwrm_clear_em_filter(bp, mf);
1040                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1041                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1042                                 /* Free the old filter, update flow
1043                                  * with new filter
1044                                  */
1045                                 bnxt_free_filter(bp, mf);
1046                                 flow->filter = nf;
1047                                 return -EXDEV;
1048                         }
1049                 }
1050         }
1051         return 0;
1052 }
1053
1054 static struct rte_flow *
1055 bnxt_flow_create(struct rte_eth_dev *dev,
1056                  const struct rte_flow_attr *attr,
1057                  const struct rte_flow_item pattern[],
1058                  const struct rte_flow_action actions[],
1059                  struct rte_flow_error *error)
1060 {
1061         struct bnxt *bp = dev->data->dev_private;
1062         struct bnxt_filter_info *filter;
1063         struct bnxt_vnic_info *vnic = NULL;
1064         bool update_flow = false;
1065         struct rte_flow *flow;
1066         unsigned int i;
1067         int ret = 0;
1068         uint32_t tun_type;
1069
1070         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1071         if (!flow) {
1072                 rte_flow_error_set(error, ENOMEM,
1073                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1074                                    "Failed to allocate memory");
1075                 return flow;
1076         }
1077
1078         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1079         if (ret != 0) {
1080                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1081                 goto free_flow;
1082         }
1083
1084         filter = bnxt_get_unused_filter(bp);
1085         if (filter == NULL) {
1086                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1087                 goto free_flow;
1088         }
1089
1090         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1091                                            error, filter);
1092         if (ret != 0)
1093                 goto free_filter;
1094
1095         ret = bnxt_match_filter(bp, filter);
1096         if (ret == -EEXIST) {
1097                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1098                 /* Clear the filter that was created as part of
1099                  * validate_and_parse_flow() above
1100                  */
1101                 bnxt_hwrm_clear_l2_filter(bp, filter);
1102                 goto free_filter;
1103         } else if (ret == -EXDEV) {
1104                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1105                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1106                 update_flow = true;
1107         }
1108
1109         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1110          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1111          * in such a case.
1112          */
1113         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1114             filter->enables == filter->tunnel_type) {
1115                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1116                 if (ret) {
1117                         rte_flow_error_set(error, -ret,
1118                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1119                                            "Unable to query tunnel to VF");
1120                         goto free_filter;
1121                 }
1122                 if (tun_type == (1U << filter->tunnel_type)) {
1123                         ret =
1124                         bnxt_hwrm_tunnel_redirect_free(bp,
1125                                                        filter->tunnel_type);
1126                         if (ret) {
1127                                 PMD_DRV_LOG(ERR,
1128                                             "Unable to free existing tunnel\n");
1129                                 rte_flow_error_set(error, -ret,
1130                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1131                                                    NULL,
1132                                                    "Unable to free preexisting "
1133                                                    "tunnel on VF");
1134                                 goto free_filter;
1135                         }
1136                 }
1137                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1138                 if (ret) {
1139                         rte_flow_error_set(error, -ret,
1140                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1141                                            "Unable to redirect tunnel to VF");
1142                         goto free_filter;
1143                 }
1144                 vnic = &bp->vnic_info[0];
1145                 goto done;
1146         }
1147
1148         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1149                 filter->enables |=
1150                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1151                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1152         }
1153
1154         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1155                 filter->enables |=
1156                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1157                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1158         }
1159
1160         for (i = 0; i < bp->nr_vnics; i++) {
1161                 vnic = &bp->vnic_info[i];
1162                 if (filter->dst_id == vnic->fw_vnic_id)
1163                         break;
1164         }
1165 done:
1166         if (!ret) {
1167                 flow->filter = filter;
1168                 flow->vnic = vnic;
1169                 if (update_flow) {
1170                         ret = -EXDEV;
1171                         goto free_flow;
1172                 }
1173                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1174                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1175                 return flow;
1176         }
1177 free_filter:
1178         bnxt_free_filter(bp, filter);
1179 free_flow:
1180         if (ret == -EEXIST)
1181                 rte_flow_error_set(error, ret,
1182                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1183                                    "Matching Flow exists.");
1184         else if (ret == -EXDEV)
1185                 rte_flow_error_set(error, ret,
1186                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1187                                    "Flow with pattern exists, updating destination queue");
1188         else
1189                 rte_flow_error_set(error, -ret,
1190                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1191                                    "Failed to create flow.");
1192         rte_free(flow);
1193         flow = NULL;
1194         return flow;
1195 }
1196
1197 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1198                                                struct bnxt_filter_info *filter,
1199                                                struct rte_flow_error *error)
1200 {
1201         uint16_t tun_dst_fid;
1202         uint32_t tun_type;
1203         int ret = 0;
1204
1205         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1206         if (ret) {
1207                 rte_flow_error_set(error, -ret,
1208                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1209                                    "Unable to query tunnel to VF");
1210                 return ret;
1211         }
1212         if (tun_type == (1U << filter->tunnel_type)) {
1213                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1214                                                      &tun_dst_fid);
1215                 if (ret) {
1216                         rte_flow_error_set(error, -ret,
1217                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1218                                            NULL,
1219                                            "tunnel_redirect info cmd fail");
1220                         return ret;
1221                 }
1222                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1223                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1224
1225                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1226                  * cmd, just delete the flow from driver
1227                  */
1228                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1229                         PMD_DRV_LOG(ERR,
1230                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1231                 else
1232                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1233                                                         filter->tunnel_type);
1234         }
1235         return ret;
1236 }
1237
1238 static int
1239 bnxt_flow_destroy(struct rte_eth_dev *dev,
1240                   struct rte_flow *flow,
1241                   struct rte_flow_error *error)
1242 {
1243         struct bnxt *bp = dev->data->dev_private;
1244         struct bnxt_filter_info *filter = flow->filter;
1245         struct bnxt_vnic_info *vnic = flow->vnic;
1246         int ret = 0;
1247
1248         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1249             filter->enables == filter->tunnel_type) {
1250                 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1251                                                           filter,
1252                                                           error);
1253                 if (!ret)
1254                         goto done;
1255                 else
1256                         return ret;
1257         }
1258
1259         ret = bnxt_match_filter(bp, filter);
1260         if (ret == 0)
1261                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1262         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1263                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1264         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1265                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1266         else
1267                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1268
1269 done:
1270         if (!ret) {
1271                 bnxt_free_filter(bp, filter);
1272                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1273                 rte_free(flow);
1274         } else {
1275                 rte_flow_error_set(error, -ret,
1276                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1277                                    "Failed to destroy flow.");
1278         }
1279
1280         return ret;
1281 }
1282
1283 static int
1284 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1285 {
1286         struct bnxt *bp = dev->data->dev_private;
1287         struct bnxt_vnic_info *vnic;
1288         struct rte_flow *flow;
1289         unsigned int i;
1290         int ret = 0;
1291
1292         for (i = 0; i < bp->nr_vnics; i++) {
1293                 vnic = &bp->vnic_info[i];
1294                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1295                         struct bnxt_filter_info *filter = flow->filter;
1296
1297                         if (filter->filter_type ==
1298                             HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1299                             filter->enables == filter->tunnel_type) {
1300                                 ret =
1301                                 bnxt_handle_tunnel_redirect_destroy(bp,
1302                                                                     filter,
1303                                                                     error);
1304                                 if (!ret)
1305                                         goto done;
1306                                 else
1307                                         return ret;
1308                         }
1309
1310                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1311                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1312                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1313                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1314
1315                         if (ret) {
1316                                 rte_flow_error_set
1317                                         (error,
1318                                          -ret,
1319                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1320                                          NULL,
1321                                          "Failed to flush flow in HW.");
1322                                 return -rte_errno;
1323                         }
1324 done:
1325                         bnxt_free_filter(bp, filter);
1326                         STAILQ_REMOVE(&vnic->flow_list, flow,
1327                                       rte_flow, next);
1328                         rte_free(flow);
1329                 }
1330         }
1331
1332         return ret;
1333 }
1334
1335 const struct rte_flow_ops bnxt_flow_ops = {
1336         .validate = bnxt_flow_validate,
1337         .create = bnxt_flow_create,
1338         .destroy = bnxt_flow_destroy,
1339         .flush = bnxt_flow_flush,
1340 };