net/bnxt: use UINT64_MAX to initialize filter ids
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
19
20 /*
21  * Filter Functions
22  */
23
24 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
25 {
26         struct bnxt_filter_info *filter;
27
28         /* Find the 1st unused filter from the free_filter_list pool*/
29         filter = STAILQ_FIRST(&bp->free_filter_list);
30         if (!filter) {
31                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
32                 return NULL;
33         }
34         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
35
36         /* Default to L2 MAC Addr filter */
37         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
38         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
39                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
40         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
41                ETHER_ADDR_LEN);
42         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
43         return filter;
44 }
45
46 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
47 {
48         struct bnxt_filter_info *filter;
49
50         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
51         if (!filter) {
52                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
53                         vf);
54                 return NULL;
55         }
56
57         filter->fw_l2_filter_id = UINT64_MAX;
58         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
59         return filter;
60 }
61
62 void bnxt_init_filters(struct bnxt *bp)
63 {
64         struct bnxt_filter_info *filter;
65         int i, max_filters;
66
67         max_filters = bp->max_l2_ctx;
68         STAILQ_INIT(&bp->free_filter_list);
69         for (i = 0; i < max_filters; i++) {
70                 filter = &bp->filter_info[i];
71                 filter->fw_l2_filter_id = UINT64_MAX;
72                 filter->fw_em_filter_id = UINT64_MAX;
73                 filter->fw_ntuple_filter_id = UINT64_MAX;
74                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
75         }
76 }
77
78 void bnxt_free_all_filters(struct bnxt *bp)
79 {
80         struct bnxt_vnic_info *vnic;
81         struct bnxt_filter_info *filter, *temp_filter;
82         int i;
83
84         for (i = 0; i < MAX_FF_POOLS; i++) {
85                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
86                         filter = STAILQ_FIRST(&vnic->filter);
87                         while (filter) {
88                                 temp_filter = STAILQ_NEXT(filter, next);
89                                 STAILQ_REMOVE(&vnic->filter, filter,
90                                               bnxt_filter_info, next);
91                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
92                                                    filter, next);
93                                 filter = temp_filter;
94                         }
95                         STAILQ_INIT(&vnic->filter);
96                 }
97         }
98
99         for (i = 0; i < bp->pf.max_vfs; i++) {
100                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
101                         bnxt_hwrm_clear_l2_filter(bp, filter);
102                 }
103         }
104 }
105
106 void bnxt_free_filter_mem(struct bnxt *bp)
107 {
108         struct bnxt_filter_info *filter;
109         uint16_t max_filters, i;
110         int rc = 0;
111
112         if (bp->filter_info == NULL)
113                 return;
114
115         /* Ensure that all filters are freed */
116         max_filters = bp->max_l2_ctx;
117         for (i = 0; i < max_filters; i++) {
118                 filter = &bp->filter_info[i];
119                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
120                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
121                         /* Call HWRM to try to free filter again */
122                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
123                         if (rc)
124                                 PMD_DRV_LOG(ERR,
125                                        "HWRM filter cannot be freed rc = %d\n",
126                                         rc);
127                 }
128                 filter->fw_l2_filter_id = UINT64_MAX;
129         }
130         STAILQ_INIT(&bp->free_filter_list);
131
132         rte_free(bp->filter_info);
133         bp->filter_info = NULL;
134
135         for (i = 0; i < bp->pf.max_vfs; i++) {
136                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
137                         rte_free(filter);
138                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
139                                       bnxt_filter_info, next);
140                 }
141         }
142 }
143
144 int bnxt_alloc_filter_mem(struct bnxt *bp)
145 {
146         struct bnxt_filter_info *filter_mem;
147         uint16_t max_filters;
148
149         max_filters = bp->max_l2_ctx;
150         /* Allocate memory for VNIC pool and filter pool */
151         filter_mem = rte_zmalloc("bnxt_filter_info",
152                                  max_filters * sizeof(struct bnxt_filter_info),
153                                  0);
154         if (filter_mem == NULL) {
155                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
156                         max_filters);
157                 return -ENOMEM;
158         }
159         bp->filter_info = filter_mem;
160         return 0;
161 }
162
163 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
164 {
165         struct bnxt_filter_info *filter;
166
167         /* Find the 1st unused filter from the free_filter_list pool*/
168         filter = STAILQ_FIRST(&bp->free_filter_list);
169         if (!filter) {
170                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
171                 return NULL;
172         }
173         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
174
175         return filter;
176 }
177
178 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
179 {
180         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
181 }
182
183 static int
184 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
185                         const struct rte_flow_item pattern[],
186                         const struct rte_flow_action actions[],
187                         struct rte_flow_error *error)
188 {
189         if (!pattern) {
190                 rte_flow_error_set(error, EINVAL,
191                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
192                         NULL, "NULL pattern.");
193                 return -rte_errno;
194         }
195
196         if (!actions) {
197                 rte_flow_error_set(error, EINVAL,
198                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
199                                    NULL, "NULL action.");
200                 return -rte_errno;
201         }
202
203         if (!attr) {
204                 rte_flow_error_set(error, EINVAL,
205                                    RTE_FLOW_ERROR_TYPE_ATTR,
206                                    NULL, "NULL attribute.");
207                 return -rte_errno;
208         }
209
210         return 0;
211 }
212
213 static const struct rte_flow_item *
214 nxt_non_void_pattern(const struct rte_flow_item *cur)
215 {
216         while (1) {
217                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
218                         return cur;
219                 cur++;
220         }
221 }
222
223 static const struct rte_flow_action *
224 nxt_non_void_action(const struct rte_flow_action *cur)
225 {
226         while (1) {
227                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
228                         return cur;
229                 cur++;
230         }
231 }
232
233 int check_zero_bytes(const uint8_t *bytes, int len)
234 {
235         int i;
236         for (i = 0; i < len; i++)
237                 if (bytes[i] != 0x00)
238                         return 0;
239         return 1;
240 }
241
242 static int
243 bnxt_filter_type_check(const struct rte_flow_item pattern[],
244                        struct rte_flow_error *error __rte_unused)
245 {
246         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
247         int use_ntuple = 1;
248
249         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
250                 switch (item->type) {
251                 case RTE_FLOW_ITEM_TYPE_ETH:
252                         use_ntuple = 1;
253                         break;
254                 case RTE_FLOW_ITEM_TYPE_VLAN:
255                         use_ntuple = 0;
256                         break;
257                 case RTE_FLOW_ITEM_TYPE_IPV4:
258                 case RTE_FLOW_ITEM_TYPE_IPV6:
259                 case RTE_FLOW_ITEM_TYPE_TCP:
260                 case RTE_FLOW_ITEM_TYPE_UDP:
261                         /* FALLTHROUGH */
262                         /* need ntuple match, reset exact match */
263                         if (!use_ntuple) {
264                                 PMD_DRV_LOG(ERR,
265                                         "VLAN flow cannot use NTUPLE filter\n");
266                                 rte_flow_error_set(error, EINVAL,
267                                                    RTE_FLOW_ERROR_TYPE_ITEM,
268                                                    item,
269                                                    "Cannot use VLAN with NTUPLE");
270                                 return -rte_errno;
271                         }
272                         use_ntuple |= 1;
273                         break;
274                 default:
275                         PMD_DRV_LOG(ERR, "Unknown Flow type");
276                         use_ntuple |= 1;
277                 }
278                 item++;
279         }
280         return use_ntuple;
281 }
282
283 static int
284 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
285                                   const struct rte_flow_item pattern[],
286                                   struct rte_flow_error *error,
287                                   struct bnxt_filter_info *filter)
288 {
289         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
290         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
291         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
292         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
293         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
294         const struct rte_flow_item_udp *udp_spec, *udp_mask;
295         const struct rte_flow_item_eth *eth_spec, *eth_mask;
296         const struct rte_flow_item_nvgre *nvgre_spec;
297         const struct rte_flow_item_nvgre *nvgre_mask;
298         const struct rte_flow_item_vxlan *vxlan_spec;
299         const struct rte_flow_item_vxlan *vxlan_mask;
300         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
301         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
302         const struct rte_flow_item_vf *vf_spec;
303         uint32_t tenant_id_be = 0;
304         bool vni_masked = 0;
305         bool tni_masked = 0;
306         uint32_t vf = 0;
307         int use_ntuple;
308         uint32_t en = 0;
309         int dflt_vnic;
310
311         use_ntuple = bnxt_filter_type_check(pattern, error);
312         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
313         if (use_ntuple < 0)
314                 return use_ntuple;
315
316         filter->filter_type = use_ntuple ?
317                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
318
319         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
320                 if (item->last) {
321                         /* last or range is NOT supported as match criteria */
322                         rte_flow_error_set(error, EINVAL,
323                                            RTE_FLOW_ERROR_TYPE_ITEM,
324                                            item,
325                                            "No support for range");
326                         return -rte_errno;
327                 }
328                 if (!item->spec || !item->mask) {
329                         rte_flow_error_set(error, EINVAL,
330                                            RTE_FLOW_ERROR_TYPE_ITEM,
331                                            item,
332                                            "spec/mask is NULL");
333                         return -rte_errno;
334                 }
335                 switch (item->type) {
336                 case RTE_FLOW_ITEM_TYPE_ETH:
337                         eth_spec = item->spec;
338                         eth_mask = item->mask;
339
340                         /* Source MAC address mask cannot be partially set.
341                          * Should be All 0's or all 1's.
342                          * Destination MAC address mask must not be partially
343                          * set. Should be all 1's or all 0's.
344                          */
345                         if ((!is_zero_ether_addr(&eth_mask->src) &&
346                              !is_broadcast_ether_addr(&eth_mask->src)) ||
347                             (!is_zero_ether_addr(&eth_mask->dst) &&
348                              !is_broadcast_ether_addr(&eth_mask->dst))) {
349                                 rte_flow_error_set(error, EINVAL,
350                                                    RTE_FLOW_ERROR_TYPE_ITEM,
351                                                    item,
352                                                    "MAC_addr mask not valid");
353                                 return -rte_errno;
354                         }
355
356                         /* Mask is not allowed. Only exact matches are */
357                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
358                                 rte_flow_error_set(error, EINVAL,
359                                                    RTE_FLOW_ERROR_TYPE_ITEM,
360                                                    item,
361                                                    "ethertype mask not valid");
362                                 return -rte_errno;
363                         }
364
365                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
366                                 rte_memcpy(filter->dst_macaddr,
367                                            &eth_spec->dst, 6);
368                                 en |= use_ntuple ?
369                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
370                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
371                         }
372                         if (is_broadcast_ether_addr(&eth_mask->src)) {
373                                 rte_memcpy(filter->src_macaddr,
374                                            &eth_spec->src, 6);
375                                 en |= use_ntuple ?
376                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
377                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
378                         } /*
379                            * else {
380                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
381                            * }
382                            */
383                         if (eth_spec->type) {
384                                 filter->ethertype =
385                                         rte_be_to_cpu_16(eth_spec->type);
386                                 en |= use_ntuple ?
387                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
388                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
389                         }
390
391                         break;
392                 case RTE_FLOW_ITEM_TYPE_VLAN:
393                         vlan_spec = item->spec;
394                         vlan_mask = item->mask;
395                         if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
396                                 /* Only the VLAN ID can be matched. */
397                                 filter->l2_ovlan =
398                                         rte_be_to_cpu_16(vlan_spec->tci &
399                                                          0xFFF);
400                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
401                         } else {
402                                 rte_flow_error_set(error, EINVAL,
403                                                    RTE_FLOW_ERROR_TYPE_ITEM,
404                                                    item,
405                                                    "VLAN mask is invalid");
406                                 return -rte_errno;
407                         }
408
409                         break;
410                 case RTE_FLOW_ITEM_TYPE_IPV4:
411                         /* If mask is not involved, we could use EM filters. */
412                         ipv4_spec = item->spec;
413                         ipv4_mask = item->mask;
414                         /* Only IP DST and SRC fields are maskable. */
415                         if (ipv4_mask->hdr.version_ihl ||
416                             ipv4_mask->hdr.type_of_service ||
417                             ipv4_mask->hdr.total_length ||
418                             ipv4_mask->hdr.packet_id ||
419                             ipv4_mask->hdr.fragment_offset ||
420                             ipv4_mask->hdr.time_to_live ||
421                             ipv4_mask->hdr.next_proto_id ||
422                             ipv4_mask->hdr.hdr_checksum) {
423                                 rte_flow_error_set(error, EINVAL,
424                                            RTE_FLOW_ERROR_TYPE_ITEM,
425                                            item,
426                                            "Invalid IPv4 mask.");
427                                 return -rte_errno;
428                         }
429                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
430                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
431                         if (use_ntuple)
432                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
433                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
434                         else
435                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
436                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
437                         if (ipv4_mask->hdr.src_addr) {
438                                 filter->src_ipaddr_mask[0] =
439                                         ipv4_mask->hdr.src_addr;
440                                 en |= !use_ntuple ? 0 :
441                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
442                         }
443                         if (ipv4_mask->hdr.dst_addr) {
444                                 filter->dst_ipaddr_mask[0] =
445                                         ipv4_mask->hdr.dst_addr;
446                                 en |= !use_ntuple ? 0 :
447                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
448                         }
449                         filter->ip_addr_type = use_ntuple ?
450                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
451                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
452                         if (ipv4_spec->hdr.next_proto_id) {
453                                 filter->ip_protocol =
454                                         ipv4_spec->hdr.next_proto_id;
455                                 if (use_ntuple)
456                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
457                                 else
458                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
459                         }
460                         break;
461                 case RTE_FLOW_ITEM_TYPE_IPV6:
462                         ipv6_spec = item->spec;
463                         ipv6_mask = item->mask;
464
465                         /* Only IP DST and SRC fields are maskable. */
466                         if (ipv6_mask->hdr.vtc_flow ||
467                             ipv6_mask->hdr.payload_len ||
468                             ipv6_mask->hdr.proto ||
469                             ipv6_mask->hdr.hop_limits) {
470                                 rte_flow_error_set(error, EINVAL,
471                                            RTE_FLOW_ERROR_TYPE_ITEM,
472                                            item,
473                                            "Invalid IPv6 mask.");
474                                 return -rte_errno;
475                         }
476
477                         if (use_ntuple)
478                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
479                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
480                         else
481                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
482                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
483                         rte_memcpy(filter->src_ipaddr,
484                                    ipv6_spec->hdr.src_addr, 16);
485                         rte_memcpy(filter->dst_ipaddr,
486                                    ipv6_spec->hdr.dst_addr, 16);
487                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
488                                 rte_memcpy(filter->src_ipaddr_mask,
489                                            ipv6_mask->hdr.src_addr, 16);
490                                 en |= !use_ntuple ? 0 :
491                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
492                         }
493                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
494                                 rte_memcpy(filter->dst_ipaddr_mask,
495                                            ipv6_mask->hdr.dst_addr, 16);
496                                 en |= !use_ntuple ? 0 :
497                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
498                         }
499                         filter->ip_addr_type = use_ntuple ?
500                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
501                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
502                         break;
503                 case RTE_FLOW_ITEM_TYPE_TCP:
504                         tcp_spec = item->spec;
505                         tcp_mask = item->mask;
506
507                         /* Check TCP mask. Only DST & SRC ports are maskable */
508                         if (tcp_mask->hdr.sent_seq ||
509                             tcp_mask->hdr.recv_ack ||
510                             tcp_mask->hdr.data_off ||
511                             tcp_mask->hdr.tcp_flags ||
512                             tcp_mask->hdr.rx_win ||
513                             tcp_mask->hdr.cksum ||
514                             tcp_mask->hdr.tcp_urp) {
515                                 rte_flow_error_set(error, EINVAL,
516                                            RTE_FLOW_ERROR_TYPE_ITEM,
517                                            item,
518                                            "Invalid TCP mask");
519                                 return -rte_errno;
520                         }
521                         filter->src_port = tcp_spec->hdr.src_port;
522                         filter->dst_port = tcp_spec->hdr.dst_port;
523                         if (use_ntuple)
524                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
525                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
526                         else
527                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
528                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
529                         if (tcp_mask->hdr.dst_port) {
530                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
531                                 en |= !use_ntuple ? 0 :
532                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
533                         }
534                         if (tcp_mask->hdr.src_port) {
535                                 filter->src_port_mask = tcp_mask->hdr.src_port;
536                                 en |= !use_ntuple ? 0 :
537                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
538                         }
539                         break;
540                 case RTE_FLOW_ITEM_TYPE_UDP:
541                         udp_spec = item->spec;
542                         udp_mask = item->mask;
543
544                         if (udp_mask->hdr.dgram_len ||
545                             udp_mask->hdr.dgram_cksum) {
546                                 rte_flow_error_set(error, EINVAL,
547                                            RTE_FLOW_ERROR_TYPE_ITEM,
548                                            item,
549                                            "Invalid UDP mask");
550                                 return -rte_errno;
551                         }
552
553                         filter->src_port = udp_spec->hdr.src_port;
554                         filter->dst_port = udp_spec->hdr.dst_port;
555                         if (use_ntuple)
556                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
557                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
558                         else
559                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
560                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
561
562                         if (udp_mask->hdr.dst_port) {
563                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
564                                 en |= !use_ntuple ? 0 :
565                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
566                         }
567                         if (udp_mask->hdr.src_port) {
568                                 filter->src_port_mask = udp_mask->hdr.src_port;
569                                 en |= !use_ntuple ? 0 :
570                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
571                         }
572                         break;
573                 case RTE_FLOW_ITEM_TYPE_VXLAN:
574                         vxlan_spec = item->spec;
575                         vxlan_mask = item->mask;
576                         /* Check if VXLAN item is used to describe protocol.
577                          * If yes, both spec and mask should be NULL.
578                          * If no, both spec and mask shouldn't be NULL.
579                          */
580                         if ((!vxlan_spec && vxlan_mask) ||
581                             (vxlan_spec && !vxlan_mask)) {
582                                 rte_flow_error_set(error, EINVAL,
583                                            RTE_FLOW_ERROR_TYPE_ITEM,
584                                            item,
585                                            "Invalid VXLAN item");
586                                 return -rte_errno;
587                         }
588
589                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
590                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
591                             vxlan_spec->flags != 0x8) {
592                                 rte_flow_error_set(error, EINVAL,
593                                            RTE_FLOW_ERROR_TYPE_ITEM,
594                                            item,
595                                            "Invalid VXLAN item");
596                                 return -rte_errno;
597                         }
598
599                         /* Check if VNI is masked. */
600                         if (vxlan_spec && vxlan_mask) {
601                                 vni_masked =
602                                         !!memcmp(vxlan_mask->vni, vni_mask,
603                                                  RTE_DIM(vni_mask));
604                                 if (vni_masked) {
605                                         rte_flow_error_set(error, EINVAL,
606                                                    RTE_FLOW_ERROR_TYPE_ITEM,
607                                                    item,
608                                                    "Invalid VNI mask");
609                                         return -rte_errno;
610                                 }
611
612                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
613                                            vxlan_spec->vni, 3);
614                                 filter->vni =
615                                         rte_be_to_cpu_32(tenant_id_be);
616                                 filter->tunnel_type =
617                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
618                         }
619                         break;
620                 case RTE_FLOW_ITEM_TYPE_NVGRE:
621                         nvgre_spec = item->spec;
622                         nvgre_mask = item->mask;
623                         /* Check if NVGRE item is used to describe protocol.
624                          * If yes, both spec and mask should be NULL.
625                          * If no, both spec and mask shouldn't be NULL.
626                          */
627                         if ((!nvgre_spec && nvgre_mask) ||
628                             (nvgre_spec && !nvgre_mask)) {
629                                 rte_flow_error_set(error, EINVAL,
630                                            RTE_FLOW_ERROR_TYPE_ITEM,
631                                            item,
632                                            "Invalid NVGRE item");
633                                 return -rte_errno;
634                         }
635
636                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
637                             nvgre_spec->protocol != 0x6558) {
638                                 rte_flow_error_set(error, EINVAL,
639                                            RTE_FLOW_ERROR_TYPE_ITEM,
640                                            item,
641                                            "Invalid NVGRE item");
642                                 return -rte_errno;
643                         }
644
645                         if (nvgre_spec && nvgre_mask) {
646                                 tni_masked =
647                                         !!memcmp(nvgre_mask->tni, tni_mask,
648                                                  RTE_DIM(tni_mask));
649                                 if (tni_masked) {
650                                         rte_flow_error_set(error, EINVAL,
651                                                        RTE_FLOW_ERROR_TYPE_ITEM,
652                                                        item,
653                                                        "Invalid TNI mask");
654                                         return -rte_errno;
655                                 }
656                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
657                                            nvgre_spec->tni, 3);
658                                 filter->vni =
659                                         rte_be_to_cpu_32(tenant_id_be);
660                                 filter->tunnel_type =
661                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
662                         }
663                         break;
664                 case RTE_FLOW_ITEM_TYPE_VF:
665                         vf_spec = item->spec;
666                         vf = vf_spec->id;
667                         if (!BNXT_PF(bp)) {
668                                 rte_flow_error_set(error, EINVAL,
669                                            RTE_FLOW_ERROR_TYPE_ITEM,
670                                            item,
671                                            "Configuring on a VF!");
672                                 return -rte_errno;
673                         }
674
675                         if (vf >= bp->pdev->max_vfs) {
676                                 rte_flow_error_set(error, EINVAL,
677                                            RTE_FLOW_ERROR_TYPE_ITEM,
678                                            item,
679                                            "Incorrect VF id!");
680                                 return -rte_errno;
681                         }
682
683                         filter->mirror_vnic_id =
684                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
685                         if (dflt_vnic < 0) {
686                                 /* This simply indicates there's no driver
687                                  * loaded. This is not an error.
688                                  */
689                                 rte_flow_error_set(error, EINVAL,
690                                            RTE_FLOW_ERROR_TYPE_ITEM,
691                                            item,
692                                            "Unable to get default VNIC for VF");
693                                 return -rte_errno;
694                         }
695                         filter->mirror_vnic_id = dflt_vnic;
696                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
697                         break;
698                 default:
699                         break;
700                 }
701                 item++;
702         }
703         filter->enables = en;
704
705         return 0;
706 }
707
708 /* Parse attributes */
709 static int
710 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
711                      struct rte_flow_error *error)
712 {
713         /* Must be input direction */
714         if (!attr->ingress) {
715                 rte_flow_error_set(error, EINVAL,
716                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
717                                    attr, "Only support ingress.");
718                 return -rte_errno;
719         }
720
721         /* Not supported */
722         if (attr->egress) {
723                 rte_flow_error_set(error, EINVAL,
724                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
725                                    attr, "No support for egress.");
726                 return -rte_errno;
727         }
728
729         /* Not supported */
730         if (attr->priority) {
731                 rte_flow_error_set(error, EINVAL,
732                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
733                                    attr, "No support for priority.");
734                 return -rte_errno;
735         }
736
737         /* Not supported */
738         if (attr->group) {
739                 rte_flow_error_set(error, EINVAL,
740                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
741                                    attr, "No support for group.");
742                 return -rte_errno;
743         }
744
745         return 0;
746 }
747
748 struct bnxt_filter_info *
749 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
750                    struct bnxt_vnic_info *vnic)
751 {
752         struct bnxt_filter_info *filter1, *f0;
753         struct bnxt_vnic_info *vnic0;
754         int rc;
755
756         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
757         f0 = STAILQ_FIRST(&vnic0->filter);
758
759         //This flow has same DST MAC as the port/l2 filter.
760         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
761                 return f0;
762
763         //This flow needs DST MAC which is not same as port/l2
764         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
765         filter1 = bnxt_get_unused_filter(bp);
766         if (filter1 == NULL)
767                 return NULL;
768         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
769         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
770                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
771         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
772         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
773         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
774                                      filter1);
775         if (rc) {
776                 bnxt_free_filter(bp, filter1);
777                 return NULL;
778         }
779         return filter1;
780 }
781
782 static int
783 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
784                              const struct rte_flow_item pattern[],
785                              const struct rte_flow_action actions[],
786                              const struct rte_flow_attr *attr,
787                              struct rte_flow_error *error,
788                              struct bnxt_filter_info *filter)
789 {
790         const struct rte_flow_action *act = nxt_non_void_action(actions);
791         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
792         const struct rte_flow_action_queue *act_q;
793         const struct rte_flow_action_vf *act_vf;
794         struct bnxt_vnic_info *vnic, *vnic0;
795         struct bnxt_filter_info *filter1;
796         uint32_t vf = 0;
797         int dflt_vnic;
798         int rc;
799
800         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
801                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
802                 rte_flow_error_set(error, EINVAL,
803                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
804                                    "Cannot create flow on RSS queues");
805                 rc = -rte_errno;
806                 goto ret;
807         }
808
809         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
810         if (rc != 0)
811                 goto ret;
812
813         rc = bnxt_flow_parse_attr(attr, error);
814         if (rc != 0)
815                 goto ret;
816         //Since we support ingress attribute only - right now.
817         if (filter->filter_type == HWRM_CFA_EM_FILTER)
818                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
819
820         switch (act->type) {
821         case RTE_FLOW_ACTION_TYPE_QUEUE:
822                 /* Allow this flow. Redirect to a VNIC. */
823                 act_q = (const struct rte_flow_action_queue *)act->conf;
824                 if (act_q->index >= bp->rx_nr_rings) {
825                         rte_flow_error_set(error, EINVAL,
826                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
827                                            "Invalid queue ID.");
828                         rc = -rte_errno;
829                         goto ret;
830                 }
831                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
832
833                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
834                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
835                 if (vnic == NULL) {
836                         rte_flow_error_set(error, EINVAL,
837                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
838                                            "No matching VNIC for queue ID.");
839                         rc = -rte_errno;
840                         goto ret;
841                 }
842                 filter->dst_id = vnic->fw_vnic_id;
843                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
844                 if (filter1 == NULL) {
845                         rc = -ENOSPC;
846                         goto ret;
847                 }
848                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
849                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
850                 break;
851         case RTE_FLOW_ACTION_TYPE_DROP:
852                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
853                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
854                 if (filter1 == NULL) {
855                         rc = -ENOSPC;
856                         goto ret;
857                 }
858                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
859                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
860                         filter->flags =
861                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
862                 else
863                         filter->flags =
864                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
865                 break;
866         case RTE_FLOW_ACTION_TYPE_COUNT:
867                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
868                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
869                 if (filter1 == NULL) {
870                         rc = -ENOSPC;
871                         goto ret;
872                 }
873                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
874                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
875                 break;
876         case RTE_FLOW_ACTION_TYPE_VF:
877                 act_vf = (const struct rte_flow_action_vf *)act->conf;
878                 vf = act_vf->id;
879                 if (!BNXT_PF(bp)) {
880                         rte_flow_error_set(error, EINVAL,
881                                    RTE_FLOW_ERROR_TYPE_ACTION,
882                                    act,
883                                    "Configuring on a VF!");
884                         rc = -rte_errno;
885                         goto ret;
886                 }
887
888                 if (vf >= bp->pdev->max_vfs) {
889                         rte_flow_error_set(error, EINVAL,
890                                    RTE_FLOW_ERROR_TYPE_ACTION,
891                                    act,
892                                    "Incorrect VF id!");
893                         rc = -rte_errno;
894                         goto ret;
895                 }
896
897                 filter->mirror_vnic_id =
898                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
899                 if (dflt_vnic < 0) {
900                         /* This simply indicates there's no driver loaded.
901                          * This is not an error.
902                          */
903                         rte_flow_error_set(error, EINVAL,
904                                    RTE_FLOW_ERROR_TYPE_ACTION,
905                                    act,
906                                    "Unable to get default VNIC for VF");
907                         rc = -rte_errno;
908                         goto ret;
909                 }
910                 filter->mirror_vnic_id = dflt_vnic;
911                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
912
913                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
914                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
915                 if (filter1 == NULL) {
916                         rc = -ENOSPC;
917                         goto ret;
918                 }
919                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
920                 break;
921
922         default:
923                 rte_flow_error_set(error, EINVAL,
924                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
925                                    "Invalid action.");
926                 rc = -rte_errno;
927                 goto ret;
928         }
929
930         act = nxt_non_void_action(++act);
931         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
932                 rte_flow_error_set(error, EINVAL,
933                                    RTE_FLOW_ERROR_TYPE_ACTION,
934                                    act, "Invalid action.");
935                 rc = -rte_errno;
936                 goto ret;
937         }
938 ret:
939         return rc;
940 }
941
942 static int
943 bnxt_flow_validate(struct rte_eth_dev *dev,
944                 const struct rte_flow_attr *attr,
945                 const struct rte_flow_item pattern[],
946                 const struct rte_flow_action actions[],
947                 struct rte_flow_error *error)
948 {
949         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
950         struct bnxt_filter_info *filter;
951         int ret = 0;
952
953         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
954         if (ret != 0)
955                 return ret;
956
957         filter = bnxt_get_unused_filter(bp);
958         if (filter == NULL) {
959                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
960                 return -ENOMEM;
961         }
962
963         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
964                                            error, filter);
965         /* No need to hold on to this filter if we are just validating flow */
966         filter->fw_l2_filter_id = UINT64_MAX;
967         bnxt_free_filter(bp, filter);
968
969         return ret;
970 }
971
972 static int
973 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
974 {
975         struct bnxt_filter_info *mf;
976         struct rte_flow *flow;
977         int i;
978
979         for (i = bp->nr_vnics - 1; i >= 0; i--) {
980                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
981
982                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
983                         mf = flow->filter;
984
985                         if (mf->filter_type == nf->filter_type &&
986                             mf->flags == nf->flags &&
987                             mf->src_port == nf->src_port &&
988                             mf->src_port_mask == nf->src_port_mask &&
989                             mf->dst_port == nf->dst_port &&
990                             mf->dst_port_mask == nf->dst_port_mask &&
991                             mf->ip_protocol == nf->ip_protocol &&
992                             mf->ip_addr_type == nf->ip_addr_type &&
993                             mf->ethertype == nf->ethertype &&
994                             mf->vni == nf->vni &&
995                             mf->tunnel_type == nf->tunnel_type &&
996                             mf->l2_ovlan == nf->l2_ovlan &&
997                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
998                             mf->l2_ivlan == nf->l2_ivlan &&
999                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1000                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1001                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1002                                     ETHER_ADDR_LEN) &&
1003                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1004                                     ETHER_ADDR_LEN) &&
1005                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1006                                     ETHER_ADDR_LEN) &&
1007                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1008                                     sizeof(nf->src_ipaddr)) &&
1009                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1010                                     sizeof(nf->src_ipaddr_mask)) &&
1011                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1012                                     sizeof(nf->dst_ipaddr)) &&
1013                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1014                                     sizeof(nf->dst_ipaddr_mask))) {
1015                                 if (mf->dst_id == nf->dst_id)
1016                                         return -EEXIST;
1017                                 /* Same Flow, Different queue
1018                                  * Clear the old ntuple filter
1019                                  */
1020                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1021                                         bnxt_hwrm_clear_em_filter(bp, mf);
1022                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1023                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1024                                 /* Free the old filter, update flow
1025                                  * with new filter
1026                                  */
1027                                 bnxt_free_filter(bp, mf);
1028                                 flow->filter = nf;
1029                                 return -EXDEV;
1030                         }
1031                 }
1032         }
1033         return 0;
1034 }
1035
1036 static struct rte_flow *
1037 bnxt_flow_create(struct rte_eth_dev *dev,
1038                   const struct rte_flow_attr *attr,
1039                   const struct rte_flow_item pattern[],
1040                   const struct rte_flow_action actions[],
1041                   struct rte_flow_error *error)
1042 {
1043         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1044         struct bnxt_filter_info *filter;
1045         struct bnxt_vnic_info *vnic = NULL;
1046         bool update_flow = false;
1047         struct rte_flow *flow;
1048         unsigned int i;
1049         int ret = 0;
1050
1051         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1052         if (!flow) {
1053                 rte_flow_error_set(error, ENOMEM,
1054                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1055                                    "Failed to allocate memory");
1056                 return flow;
1057         }
1058
1059         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1060         if (ret != 0) {
1061                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1062                 goto free_flow;
1063         }
1064
1065         filter = bnxt_get_unused_filter(bp);
1066         if (filter == NULL) {
1067                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1068                 goto free_flow;
1069         }
1070
1071         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1072                                            error, filter);
1073         if (ret != 0)
1074                 goto free_filter;
1075
1076         ret = bnxt_match_filter(bp, filter);
1077         if (ret == -EEXIST) {
1078                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1079                 /* Clear the filter that was created as part of
1080                  * validate_and_parse_flow() above
1081                  */
1082                 bnxt_hwrm_clear_l2_filter(bp, filter);
1083                 goto free_filter;
1084         } else if (ret == -EXDEV) {
1085                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1086                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1087                 update_flow = true;
1088         }
1089
1090         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1091                 filter->enables |=
1092                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1093                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1094         }
1095         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1096                 filter->enables |=
1097                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1098                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1099         }
1100
1101         for (i = 0; i < bp->nr_vnics; i++) {
1102                 vnic = &bp->vnic_info[i];
1103                 if (filter->dst_id == vnic->fw_vnic_id)
1104                         break;
1105         }
1106
1107         if (!ret) {
1108                 flow->filter = filter;
1109                 flow->vnic = vnic;
1110                 if (update_flow) {
1111                         ret = -EXDEV;
1112                         goto free_flow;
1113                 }
1114                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1115                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1116                 return flow;
1117         }
1118 free_filter:
1119         bnxt_free_filter(bp, filter);
1120 free_flow:
1121         if (ret == -EEXIST)
1122                 rte_flow_error_set(error, ret,
1123                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1124                                    "Matching Flow exists.");
1125         else if (ret == -EXDEV)
1126                 rte_flow_error_set(error, ret,
1127                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128                                    "Flow with pattern exists, updating destination queue");
1129         else
1130                 rte_flow_error_set(error, -ret,
1131                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1132                                    "Failed to create flow.");
1133         rte_free(flow);
1134         flow = NULL;
1135         return flow;
1136 }
1137
1138 static int
1139 bnxt_flow_destroy(struct rte_eth_dev *dev,
1140                   struct rte_flow *flow,
1141                   struct rte_flow_error *error)
1142 {
1143         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1144         struct bnxt_filter_info *filter = flow->filter;
1145         struct bnxt_vnic_info *vnic = flow->vnic;
1146         int ret = 0;
1147
1148         ret = bnxt_match_filter(bp, filter);
1149         if (ret == 0)
1150                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1151         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1152                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1153         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1154                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1155         else
1156                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1157         if (!ret) {
1158                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1159                 rte_free(flow);
1160         } else {
1161                 rte_flow_error_set(error, -ret,
1162                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1163                                    "Failed to destroy flow.");
1164         }
1165
1166         return ret;
1167 }
1168
1169 static int
1170 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1171 {
1172         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1173         struct bnxt_vnic_info *vnic;
1174         struct rte_flow *flow;
1175         unsigned int i;
1176         int ret = 0;
1177
1178         for (i = 0; i < bp->nr_vnics; i++) {
1179                 vnic = &bp->vnic_info[i];
1180                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1181                         struct bnxt_filter_info *filter = flow->filter;
1182
1183                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1184                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1185                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1186                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1187
1188                         if (ret) {
1189                                 rte_flow_error_set(error, -ret,
1190                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1191                                                    NULL,
1192                                                    "Failed to flush flow in HW.");
1193                                 return -rte_errno;
1194                         }
1195
1196                         STAILQ_REMOVE(&vnic->flow_list, flow,
1197                                       rte_flow, next);
1198                         rte_free(flow);
1199                 }
1200         }
1201
1202         return ret;
1203 }
1204
1205 const struct rte_flow_ops bnxt_flow_ops = {
1206         .validate = bnxt_flow_validate,
1207         .create = bnxt_flow_create,
1208         .destroy = bnxt_flow_destroy,
1209         .flush = bnxt_flow_flush,
1210 };