net/bnxt: fix matching of flow API item masks
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22  * Filter Functions
23  */
24
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 {
27         struct bnxt_filter_info *filter;
28
29         /* Find the 1st unused filter from the free_filter_list pool*/
30         filter = STAILQ_FIRST(&bp->free_filter_list);
31         if (!filter) {
32                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
33                 return NULL;
34         }
35         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36
37         /* Default to L2 MAC Addr filter */
38         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42                ETHER_ADDR_LEN);
43         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
44         return filter;
45 }
46
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 {
49         struct bnxt_filter_info *filter;
50
51         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52         if (!filter) {
53                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
54                         vf);
55                 return NULL;
56         }
57
58         filter->fw_l2_filter_id = UINT64_MAX;
59         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
60         return filter;
61 }
62
63 void bnxt_init_filters(struct bnxt *bp)
64 {
65         struct bnxt_filter_info *filter;
66         int i, max_filters;
67
68         max_filters = bp->max_l2_ctx;
69         STAILQ_INIT(&bp->free_filter_list);
70         for (i = 0; i < max_filters; i++) {
71                 filter = &bp->filter_info[i];
72                 filter->fw_l2_filter_id = UINT64_MAX;
73                 filter->fw_em_filter_id = UINT64_MAX;
74                 filter->fw_ntuple_filter_id = UINT64_MAX;
75                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
76         }
77 }
78
79 void bnxt_free_all_filters(struct bnxt *bp)
80 {
81         struct bnxt_vnic_info *vnic;
82         struct bnxt_filter_info *filter, *temp_filter;
83         int i;
84
85         for (i = 0; i < MAX_FF_POOLS; i++) {
86                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87                         filter = STAILQ_FIRST(&vnic->filter);
88                         while (filter) {
89                                 temp_filter = STAILQ_NEXT(filter, next);
90                                 STAILQ_REMOVE(&vnic->filter, filter,
91                                               bnxt_filter_info, next);
92                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
93                                                    filter, next);
94                                 filter = temp_filter;
95                         }
96                         STAILQ_INIT(&vnic->filter);
97                 }
98         }
99
100         for (i = 0; i < bp->pf.max_vfs; i++) {
101                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102                         bnxt_hwrm_clear_l2_filter(bp, filter);
103                 }
104         }
105 }
106
107 void bnxt_free_filter_mem(struct bnxt *bp)
108 {
109         struct bnxt_filter_info *filter;
110         uint16_t max_filters, i;
111         int rc = 0;
112
113         if (bp->filter_info == NULL)
114                 return;
115
116         /* Ensure that all filters are freed */
117         max_filters = bp->max_l2_ctx;
118         for (i = 0; i < max_filters; i++) {
119                 filter = &bp->filter_info[i];
120                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122                         /* Call HWRM to try to free filter again */
123                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
124                         if (rc)
125                                 PMD_DRV_LOG(ERR,
126                                        "HWRM filter cannot be freed rc = %d\n",
127                                         rc);
128                 }
129                 filter->fw_l2_filter_id = UINT64_MAX;
130         }
131         STAILQ_INIT(&bp->free_filter_list);
132
133         rte_free(bp->filter_info);
134         bp->filter_info = NULL;
135
136         for (i = 0; i < bp->pf.max_vfs; i++) {
137                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
138                         rte_free(filter);
139                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140                                       bnxt_filter_info, next);
141                 }
142         }
143 }
144
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
146 {
147         struct bnxt_filter_info *filter_mem;
148         uint16_t max_filters;
149
150         max_filters = bp->max_l2_ctx;
151         /* Allocate memory for VNIC pool and filter pool */
152         filter_mem = rte_zmalloc("bnxt_filter_info",
153                                  max_filters * sizeof(struct bnxt_filter_info),
154                                  0);
155         if (filter_mem == NULL) {
156                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
157                         max_filters);
158                 return -ENOMEM;
159         }
160         bp->filter_info = filter_mem;
161         return 0;
162 }
163
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter;
167
168         /* Find the 1st unused filter from the free_filter_list pool*/
169         filter = STAILQ_FIRST(&bp->free_filter_list);
170         if (!filter) {
171                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
172                 return NULL;
173         }
174         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
175
176         return filter;
177 }
178
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
180 {
181         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
182 }
183
184 static int
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186                         const struct rte_flow_item pattern[],
187                         const struct rte_flow_action actions[],
188                         struct rte_flow_error *error)
189 {
190         if (!pattern) {
191                 rte_flow_error_set(error, EINVAL,
192                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193                         NULL, "NULL pattern.");
194                 return -rte_errno;
195         }
196
197         if (!actions) {
198                 rte_flow_error_set(error, EINVAL,
199                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200                                    NULL, "NULL action.");
201                 return -rte_errno;
202         }
203
204         if (!attr) {
205                 rte_flow_error_set(error, EINVAL,
206                                    RTE_FLOW_ERROR_TYPE_ATTR,
207                                    NULL, "NULL attribute.");
208                 return -rte_errno;
209         }
210
211         return 0;
212 }
213
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
216 {
217         while (1) {
218                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
219                         return cur;
220                 cur++;
221         }
222 }
223
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
226 {
227         while (1) {
228                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
229                         return cur;
230                 cur++;
231         }
232 }
233
234 int check_zero_bytes(const uint8_t *bytes, int len)
235 {
236         int i;
237         for (i = 0; i < len; i++)
238                 if (bytes[i] != 0x00)
239                         return 0;
240         return 1;
241 }
242
243 static int
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245                        struct rte_flow_error *error __rte_unused)
246 {
247         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
248         int use_ntuple = 1;
249
250         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251                 switch (item->type) {
252                 case RTE_FLOW_ITEM_TYPE_ETH:
253                         use_ntuple = 1;
254                         break;
255                 case RTE_FLOW_ITEM_TYPE_VLAN:
256                         use_ntuple = 0;
257                         break;
258                 case RTE_FLOW_ITEM_TYPE_IPV4:
259                 case RTE_FLOW_ITEM_TYPE_IPV6:
260                 case RTE_FLOW_ITEM_TYPE_TCP:
261                 case RTE_FLOW_ITEM_TYPE_UDP:
262                         /* FALLTHROUGH */
263                         /* need ntuple match, reset exact match */
264                         if (!use_ntuple) {
265                                 PMD_DRV_LOG(ERR,
266                                         "VLAN flow cannot use NTUPLE filter\n");
267                                 rte_flow_error_set(error, EINVAL,
268                                                    RTE_FLOW_ERROR_TYPE_ITEM,
269                                                    item,
270                                                    "Cannot use VLAN with NTUPLE");
271                                 return -rte_errno;
272                         }
273                         use_ntuple |= 1;
274                         break;
275                 default:
276                         PMD_DRV_LOG(ERR, "Unknown Flow type");
277                         use_ntuple |= 1;
278                 }
279                 item++;
280         }
281         return use_ntuple;
282 }
283
284 static int
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286                                   const struct rte_flow_item pattern[],
287                                   struct rte_flow_error *error,
288                                   struct bnxt_filter_info *filter)
289 {
290         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
291         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
292         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
293         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
294         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
295         const struct rte_flow_item_udp *udp_spec, *udp_mask;
296         const struct rte_flow_item_eth *eth_spec, *eth_mask;
297         const struct rte_flow_item_nvgre *nvgre_spec;
298         const struct rte_flow_item_nvgre *nvgre_mask;
299         const struct rte_flow_item_vxlan *vxlan_spec;
300         const struct rte_flow_item_vxlan *vxlan_mask;
301         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
302         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
303         const struct rte_flow_item_vf *vf_spec;
304         uint32_t tenant_id_be = 0;
305         bool vni_masked = 0;
306         bool tni_masked = 0;
307         uint32_t vf = 0;
308         int use_ntuple;
309         uint32_t en = 0;
310         int dflt_vnic;
311
312         use_ntuple = bnxt_filter_type_check(pattern, error);
313         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
314         if (use_ntuple < 0)
315                 return use_ntuple;
316
317         filter->filter_type = use_ntuple ?
318                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
319
320         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
321                 if (item->last) {
322                         /* last or range is NOT supported as match criteria */
323                         rte_flow_error_set(error, EINVAL,
324                                            RTE_FLOW_ERROR_TYPE_ITEM,
325                                            item,
326                                            "No support for range");
327                         return -rte_errno;
328                 }
329                 if (!item->spec || !item->mask) {
330                         rte_flow_error_set(error, EINVAL,
331                                            RTE_FLOW_ERROR_TYPE_ITEM,
332                                            item,
333                                            "spec/mask is NULL");
334                         return -rte_errno;
335                 }
336                 switch (item->type) {
337                 case RTE_FLOW_ITEM_TYPE_ETH:
338                         eth_spec = item->spec;
339                         eth_mask = item->mask;
340
341                         /* Source MAC address mask cannot be partially set.
342                          * Should be All 0's or all 1's.
343                          * Destination MAC address mask must not be partially
344                          * set. Should be all 1's or all 0's.
345                          */
346                         if ((!is_zero_ether_addr(&eth_mask->src) &&
347                              !is_broadcast_ether_addr(&eth_mask->src)) ||
348                             (!is_zero_ether_addr(&eth_mask->dst) &&
349                              !is_broadcast_ether_addr(&eth_mask->dst))) {
350                                 rte_flow_error_set(error, EINVAL,
351                                                    RTE_FLOW_ERROR_TYPE_ITEM,
352                                                    item,
353                                                    "MAC_addr mask not valid");
354                                 return -rte_errno;
355                         }
356
357                         /* Mask is not allowed. Only exact matches are */
358                         if (eth_mask->type &&
359                             eth_mask->type != RTE_BE16(0xffff)) {
360                                 rte_flow_error_set(error, EINVAL,
361                                                    RTE_FLOW_ERROR_TYPE_ITEM,
362                                                    item,
363                                                    "ethertype mask not valid");
364                                 return -rte_errno;
365                         }
366
367                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
368                                 rte_memcpy(filter->dst_macaddr,
369                                            &eth_spec->dst, 6);
370                                 en |= use_ntuple ?
371                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
372                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
373                         }
374                         if (is_broadcast_ether_addr(&eth_mask->src)) {
375                                 rte_memcpy(filter->src_macaddr,
376                                            &eth_spec->src, 6);
377                                 en |= use_ntuple ?
378                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
379                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
380                         } /*
381                            * else {
382                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
383                            * }
384                            */
385                         if (eth_mask->type) {
386                                 filter->ethertype =
387                                         rte_be_to_cpu_16(eth_spec->type);
388                                 en |= use_ntuple ?
389                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
390                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
391                         }
392
393                         break;
394                 case RTE_FLOW_ITEM_TYPE_VLAN:
395                         vlan_spec = item->spec;
396                         vlan_mask = item->mask;
397                         if (vlan_mask->tci &&
398                             vlan_mask->tci == RTE_BE16(0x0fff) &&
399                             !vlan_mask->tpid) {
400                                 /* Only the VLAN ID can be matched. */
401                                 filter->l2_ovlan =
402                                         rte_be_to_cpu_16(vlan_spec->tci &
403                                                          RTE_BE16(0x0fff));
404                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
405                         } else if (vlan_mask->tci || vlan_mask->tpid) {
406                                 rte_flow_error_set(error, EINVAL,
407                                                    RTE_FLOW_ERROR_TYPE_ITEM,
408                                                    item,
409                                                    "VLAN mask is invalid");
410                                 return -rte_errno;
411                         }
412
413                         break;
414                 case RTE_FLOW_ITEM_TYPE_IPV4:
415                         /* If mask is not involved, we could use EM filters. */
416                         ipv4_spec = item->spec;
417                         ipv4_mask = item->mask;
418                         /* Only IP DST and SRC fields are maskable. */
419                         if (ipv4_mask->hdr.version_ihl ||
420                             ipv4_mask->hdr.type_of_service ||
421                             ipv4_mask->hdr.total_length ||
422                             ipv4_mask->hdr.packet_id ||
423                             ipv4_mask->hdr.fragment_offset ||
424                             ipv4_mask->hdr.time_to_live ||
425                             ipv4_mask->hdr.next_proto_id ||
426                             ipv4_mask->hdr.hdr_checksum) {
427                                 rte_flow_error_set(error, EINVAL,
428                                            RTE_FLOW_ERROR_TYPE_ITEM,
429                                            item,
430                                            "Invalid IPv4 mask.");
431                                 return -rte_errno;
432                         }
433                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
434                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
435                         if (use_ntuple)
436                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
437                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
438                         else
439                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
440                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
441                         if (ipv4_mask->hdr.src_addr) {
442                                 filter->src_ipaddr_mask[0] =
443                                         ipv4_mask->hdr.src_addr;
444                                 en |= !use_ntuple ? 0 :
445                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
446                         }
447                         if (ipv4_mask->hdr.dst_addr) {
448                                 filter->dst_ipaddr_mask[0] =
449                                         ipv4_mask->hdr.dst_addr;
450                                 en |= !use_ntuple ? 0 :
451                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
452                         }
453                         filter->ip_addr_type = use_ntuple ?
454                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
455                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
456                         if (ipv4_spec->hdr.next_proto_id) {
457                                 filter->ip_protocol =
458                                         ipv4_spec->hdr.next_proto_id;
459                                 if (use_ntuple)
460                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
461                                 else
462                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
463                         }
464                         break;
465                 case RTE_FLOW_ITEM_TYPE_IPV6:
466                         ipv6_spec = item->spec;
467                         ipv6_mask = item->mask;
468
469                         /* Only IP DST and SRC fields are maskable. */
470                         if (ipv6_mask->hdr.vtc_flow ||
471                             ipv6_mask->hdr.payload_len ||
472                             ipv6_mask->hdr.proto ||
473                             ipv6_mask->hdr.hop_limits) {
474                                 rte_flow_error_set(error, EINVAL,
475                                            RTE_FLOW_ERROR_TYPE_ITEM,
476                                            item,
477                                            "Invalid IPv6 mask.");
478                                 return -rte_errno;
479                         }
480
481                         if (use_ntuple)
482                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
483                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
484                         else
485                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
486                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
487                         rte_memcpy(filter->src_ipaddr,
488                                    ipv6_spec->hdr.src_addr, 16);
489                         rte_memcpy(filter->dst_ipaddr,
490                                    ipv6_spec->hdr.dst_addr, 16);
491                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
492                                 rte_memcpy(filter->src_ipaddr_mask,
493                                            ipv6_mask->hdr.src_addr, 16);
494                                 en |= !use_ntuple ? 0 :
495                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
496                         }
497                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
498                                 rte_memcpy(filter->dst_ipaddr_mask,
499                                            ipv6_mask->hdr.dst_addr, 16);
500                                 en |= !use_ntuple ? 0 :
501                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
502                         }
503                         filter->ip_addr_type = use_ntuple ?
504                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
505                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
506                         break;
507                 case RTE_FLOW_ITEM_TYPE_TCP:
508                         tcp_spec = item->spec;
509                         tcp_mask = item->mask;
510
511                         /* Check TCP mask. Only DST & SRC ports are maskable */
512                         if (tcp_mask->hdr.sent_seq ||
513                             tcp_mask->hdr.recv_ack ||
514                             tcp_mask->hdr.data_off ||
515                             tcp_mask->hdr.tcp_flags ||
516                             tcp_mask->hdr.rx_win ||
517                             tcp_mask->hdr.cksum ||
518                             tcp_mask->hdr.tcp_urp) {
519                                 rte_flow_error_set(error, EINVAL,
520                                            RTE_FLOW_ERROR_TYPE_ITEM,
521                                            item,
522                                            "Invalid TCP mask");
523                                 return -rte_errno;
524                         }
525                         filter->src_port = tcp_spec->hdr.src_port;
526                         filter->dst_port = tcp_spec->hdr.dst_port;
527                         if (use_ntuple)
528                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
529                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
530                         else
531                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
532                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
533                         if (tcp_mask->hdr.dst_port) {
534                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
535                                 en |= !use_ntuple ? 0 :
536                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
537                         }
538                         if (tcp_mask->hdr.src_port) {
539                                 filter->src_port_mask = tcp_mask->hdr.src_port;
540                                 en |= !use_ntuple ? 0 :
541                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
542                         }
543                         break;
544                 case RTE_FLOW_ITEM_TYPE_UDP:
545                         udp_spec = item->spec;
546                         udp_mask = item->mask;
547
548                         if (udp_mask->hdr.dgram_len ||
549                             udp_mask->hdr.dgram_cksum) {
550                                 rte_flow_error_set(error, EINVAL,
551                                            RTE_FLOW_ERROR_TYPE_ITEM,
552                                            item,
553                                            "Invalid UDP mask");
554                                 return -rte_errno;
555                         }
556
557                         filter->src_port = udp_spec->hdr.src_port;
558                         filter->dst_port = udp_spec->hdr.dst_port;
559                         if (use_ntuple)
560                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
561                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
562                         else
563                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
564                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
565
566                         if (udp_mask->hdr.dst_port) {
567                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
568                                 en |= !use_ntuple ? 0 :
569                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
570                         }
571                         if (udp_mask->hdr.src_port) {
572                                 filter->src_port_mask = udp_mask->hdr.src_port;
573                                 en |= !use_ntuple ? 0 :
574                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
575                         }
576                         break;
577                 case RTE_FLOW_ITEM_TYPE_VXLAN:
578                         vxlan_spec = item->spec;
579                         vxlan_mask = item->mask;
580                         /* Check if VXLAN item is used to describe protocol.
581                          * If yes, both spec and mask should be NULL.
582                          * If no, both spec and mask shouldn't be NULL.
583                          */
584                         if ((!vxlan_spec && vxlan_mask) ||
585                             (vxlan_spec && !vxlan_mask)) {
586                                 rte_flow_error_set(error, EINVAL,
587                                            RTE_FLOW_ERROR_TYPE_ITEM,
588                                            item,
589                                            "Invalid VXLAN item");
590                                 return -rte_errno;
591                         }
592
593                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
594                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
595                             vxlan_spec->flags != 0x8) {
596                                 rte_flow_error_set(error, EINVAL,
597                                            RTE_FLOW_ERROR_TYPE_ITEM,
598                                            item,
599                                            "Invalid VXLAN item");
600                                 return -rte_errno;
601                         }
602
603                         /* Check if VNI is masked. */
604                         if (vxlan_spec && vxlan_mask) {
605                                 vni_masked =
606                                         !!memcmp(vxlan_mask->vni, vni_mask,
607                                                  RTE_DIM(vni_mask));
608                                 if (vni_masked) {
609                                         rte_flow_error_set(error, EINVAL,
610                                                    RTE_FLOW_ERROR_TYPE_ITEM,
611                                                    item,
612                                                    "Invalid VNI mask");
613                                         return -rte_errno;
614                                 }
615
616                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
617                                            vxlan_spec->vni, 3);
618                                 filter->vni =
619                                         rte_be_to_cpu_32(tenant_id_be);
620                                 filter->tunnel_type =
621                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
622                         }
623                         break;
624                 case RTE_FLOW_ITEM_TYPE_NVGRE:
625                         nvgre_spec = item->spec;
626                         nvgre_mask = item->mask;
627                         /* Check if NVGRE item is used to describe protocol.
628                          * If yes, both spec and mask should be NULL.
629                          * If no, both spec and mask shouldn't be NULL.
630                          */
631                         if ((!nvgre_spec && nvgre_mask) ||
632                             (nvgre_spec && !nvgre_mask)) {
633                                 rte_flow_error_set(error, EINVAL,
634                                            RTE_FLOW_ERROR_TYPE_ITEM,
635                                            item,
636                                            "Invalid NVGRE item");
637                                 return -rte_errno;
638                         }
639
640                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
641                             nvgre_spec->protocol != 0x6558) {
642                                 rte_flow_error_set(error, EINVAL,
643                                            RTE_FLOW_ERROR_TYPE_ITEM,
644                                            item,
645                                            "Invalid NVGRE item");
646                                 return -rte_errno;
647                         }
648
649                         if (nvgre_spec && nvgre_mask) {
650                                 tni_masked =
651                                         !!memcmp(nvgre_mask->tni, tni_mask,
652                                                  RTE_DIM(tni_mask));
653                                 if (tni_masked) {
654                                         rte_flow_error_set(error, EINVAL,
655                                                        RTE_FLOW_ERROR_TYPE_ITEM,
656                                                        item,
657                                                        "Invalid TNI mask");
658                                         return -rte_errno;
659                                 }
660                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
661                                            nvgre_spec->tni, 3);
662                                 filter->vni =
663                                         rte_be_to_cpu_32(tenant_id_be);
664                                 filter->tunnel_type =
665                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
666                         }
667                         break;
668                 case RTE_FLOW_ITEM_TYPE_VF:
669                         vf_spec = item->spec;
670                         vf = vf_spec->id;
671                         if (!BNXT_PF(bp)) {
672                                 rte_flow_error_set(error, EINVAL,
673                                            RTE_FLOW_ERROR_TYPE_ITEM,
674                                            item,
675                                            "Configuring on a VF!");
676                                 return -rte_errno;
677                         }
678
679                         if (vf >= bp->pdev->max_vfs) {
680                                 rte_flow_error_set(error, EINVAL,
681                                            RTE_FLOW_ERROR_TYPE_ITEM,
682                                            item,
683                                            "Incorrect VF id!");
684                                 return -rte_errno;
685                         }
686
687                         filter->mirror_vnic_id =
688                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
689                         if (dflt_vnic < 0) {
690                                 /* This simply indicates there's no driver
691                                  * loaded. This is not an error.
692                                  */
693                                 rte_flow_error_set(error, EINVAL,
694                                            RTE_FLOW_ERROR_TYPE_ITEM,
695                                            item,
696                                            "Unable to get default VNIC for VF");
697                                 return -rte_errno;
698                         }
699                         filter->mirror_vnic_id = dflt_vnic;
700                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
701                         break;
702                 default:
703                         break;
704                 }
705                 item++;
706         }
707         filter->enables = en;
708
709         return 0;
710 }
711
712 /* Parse attributes */
713 static int
714 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
715                      struct rte_flow_error *error)
716 {
717         /* Must be input direction */
718         if (!attr->ingress) {
719                 rte_flow_error_set(error, EINVAL,
720                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
721                                    attr, "Only support ingress.");
722                 return -rte_errno;
723         }
724
725         /* Not supported */
726         if (attr->egress) {
727                 rte_flow_error_set(error, EINVAL,
728                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
729                                    attr, "No support for egress.");
730                 return -rte_errno;
731         }
732
733         /* Not supported */
734         if (attr->priority) {
735                 rte_flow_error_set(error, EINVAL,
736                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
737                                    attr, "No support for priority.");
738                 return -rte_errno;
739         }
740
741         /* Not supported */
742         if (attr->group) {
743                 rte_flow_error_set(error, EINVAL,
744                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
745                                    attr, "No support for group.");
746                 return -rte_errno;
747         }
748
749         return 0;
750 }
751
752 struct bnxt_filter_info *
753 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
754                    struct bnxt_vnic_info *vnic)
755 {
756         struct bnxt_filter_info *filter1, *f0;
757         struct bnxt_vnic_info *vnic0;
758         int rc;
759
760         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
761         f0 = STAILQ_FIRST(&vnic0->filter);
762
763         //This flow has same DST MAC as the port/l2 filter.
764         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
765                 return f0;
766
767         //This flow needs DST MAC which is not same as port/l2
768         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
769         filter1 = bnxt_get_unused_filter(bp);
770         if (filter1 == NULL)
771                 return NULL;
772         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
773         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
774                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
775         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
776         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
777         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
778                                      filter1);
779         if (rc) {
780                 bnxt_free_filter(bp, filter1);
781                 return NULL;
782         }
783         return filter1;
784 }
785
786 static int
787 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
788                              const struct rte_flow_item pattern[],
789                              const struct rte_flow_action actions[],
790                              const struct rte_flow_attr *attr,
791                              struct rte_flow_error *error,
792                              struct bnxt_filter_info *filter)
793 {
794         const struct rte_flow_action *act = nxt_non_void_action(actions);
795         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
796         const struct rte_flow_action_queue *act_q;
797         const struct rte_flow_action_vf *act_vf;
798         struct bnxt_vnic_info *vnic, *vnic0;
799         struct bnxt_filter_info *filter1;
800         uint32_t vf = 0;
801         int dflt_vnic;
802         int rc;
803
804         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
805                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
806                 rte_flow_error_set(error, EINVAL,
807                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
808                                    "Cannot create flow on RSS queues");
809                 rc = -rte_errno;
810                 goto ret;
811         }
812
813         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
814         if (rc != 0)
815                 goto ret;
816
817         rc = bnxt_flow_parse_attr(attr, error);
818         if (rc != 0)
819                 goto ret;
820         //Since we support ingress attribute only - right now.
821         if (filter->filter_type == HWRM_CFA_EM_FILTER)
822                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
823
824         switch (act->type) {
825         case RTE_FLOW_ACTION_TYPE_QUEUE:
826                 /* Allow this flow. Redirect to a VNIC. */
827                 act_q = (const struct rte_flow_action_queue *)act->conf;
828                 if (act_q->index >= bp->rx_nr_rings) {
829                         rte_flow_error_set(error, EINVAL,
830                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
831                                            "Invalid queue ID.");
832                         rc = -rte_errno;
833                         goto ret;
834                 }
835                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
836
837                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
838                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
839                 if (vnic == NULL) {
840                         rte_flow_error_set(error, EINVAL,
841                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
842                                            "No matching VNIC for queue ID.");
843                         rc = -rte_errno;
844                         goto ret;
845                 }
846                 filter->dst_id = vnic->fw_vnic_id;
847                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
848                 if (filter1 == NULL) {
849                         rc = -ENOSPC;
850                         goto ret;
851                 }
852                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
853                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
854                 break;
855         case RTE_FLOW_ACTION_TYPE_DROP:
856                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
857                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
858                 if (filter1 == NULL) {
859                         rc = -ENOSPC;
860                         goto ret;
861                 }
862                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
863                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
864                         filter->flags =
865                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
866                 else
867                         filter->flags =
868                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
869                 break;
870         case RTE_FLOW_ACTION_TYPE_COUNT:
871                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
872                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
873                 if (filter1 == NULL) {
874                         rc = -ENOSPC;
875                         goto ret;
876                 }
877                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
878                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
879                 break;
880         case RTE_FLOW_ACTION_TYPE_VF:
881                 act_vf = (const struct rte_flow_action_vf *)act->conf;
882                 vf = act_vf->id;
883                 if (!BNXT_PF(bp)) {
884                         rte_flow_error_set(error, EINVAL,
885                                    RTE_FLOW_ERROR_TYPE_ACTION,
886                                    act,
887                                    "Configuring on a VF!");
888                         rc = -rte_errno;
889                         goto ret;
890                 }
891
892                 if (vf >= bp->pdev->max_vfs) {
893                         rte_flow_error_set(error, EINVAL,
894                                    RTE_FLOW_ERROR_TYPE_ACTION,
895                                    act,
896                                    "Incorrect VF id!");
897                         rc = -rte_errno;
898                         goto ret;
899                 }
900
901                 filter->mirror_vnic_id =
902                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
903                 if (dflt_vnic < 0) {
904                         /* This simply indicates there's no driver loaded.
905                          * This is not an error.
906                          */
907                         rte_flow_error_set(error, EINVAL,
908                                    RTE_FLOW_ERROR_TYPE_ACTION,
909                                    act,
910                                    "Unable to get default VNIC for VF");
911                         rc = -rte_errno;
912                         goto ret;
913                 }
914                 filter->mirror_vnic_id = dflt_vnic;
915                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
916
917                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
918                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
919                 if (filter1 == NULL) {
920                         rc = -ENOSPC;
921                         goto ret;
922                 }
923                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
924                 break;
925
926         default:
927                 rte_flow_error_set(error, EINVAL,
928                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
929                                    "Invalid action.");
930                 rc = -rte_errno;
931                 goto ret;
932         }
933
934         act = nxt_non_void_action(++act);
935         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
936                 rte_flow_error_set(error, EINVAL,
937                                    RTE_FLOW_ERROR_TYPE_ACTION,
938                                    act, "Invalid action.");
939                 rc = -rte_errno;
940                 goto ret;
941         }
942 ret:
943         return rc;
944 }
945
946 static int
947 bnxt_flow_validate(struct rte_eth_dev *dev,
948                 const struct rte_flow_attr *attr,
949                 const struct rte_flow_item pattern[],
950                 const struct rte_flow_action actions[],
951                 struct rte_flow_error *error)
952 {
953         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
954         struct bnxt_filter_info *filter;
955         int ret = 0;
956
957         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
958         if (ret != 0)
959                 return ret;
960
961         filter = bnxt_get_unused_filter(bp);
962         if (filter == NULL) {
963                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
964                 return -ENOMEM;
965         }
966
967         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
968                                            error, filter);
969         /* No need to hold on to this filter if we are just validating flow */
970         filter->fw_l2_filter_id = UINT64_MAX;
971         bnxt_free_filter(bp, filter);
972
973         return ret;
974 }
975
976 static int
977 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
978 {
979         struct bnxt_filter_info *mf;
980         struct rte_flow *flow;
981         int i;
982
983         for (i = bp->nr_vnics - 1; i >= 0; i--) {
984                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
985
986                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
987                         mf = flow->filter;
988
989                         if (mf->filter_type == nf->filter_type &&
990                             mf->flags == nf->flags &&
991                             mf->src_port == nf->src_port &&
992                             mf->src_port_mask == nf->src_port_mask &&
993                             mf->dst_port == nf->dst_port &&
994                             mf->dst_port_mask == nf->dst_port_mask &&
995                             mf->ip_protocol == nf->ip_protocol &&
996                             mf->ip_addr_type == nf->ip_addr_type &&
997                             mf->ethertype == nf->ethertype &&
998                             mf->vni == nf->vni &&
999                             mf->tunnel_type == nf->tunnel_type &&
1000                             mf->l2_ovlan == nf->l2_ovlan &&
1001                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1002                             mf->l2_ivlan == nf->l2_ivlan &&
1003                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1004                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1005                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1006                                     ETHER_ADDR_LEN) &&
1007                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1008                                     ETHER_ADDR_LEN) &&
1009                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1010                                     ETHER_ADDR_LEN) &&
1011                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1012                                     sizeof(nf->src_ipaddr)) &&
1013                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1014                                     sizeof(nf->src_ipaddr_mask)) &&
1015                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1016                                     sizeof(nf->dst_ipaddr)) &&
1017                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1018                                     sizeof(nf->dst_ipaddr_mask))) {
1019                                 if (mf->dst_id == nf->dst_id)
1020                                         return -EEXIST;
1021                                 /* Same Flow, Different queue
1022                                  * Clear the old ntuple filter
1023                                  */
1024                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1025                                         bnxt_hwrm_clear_em_filter(bp, mf);
1026                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1027                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1028                                 /* Free the old filter, update flow
1029                                  * with new filter
1030                                  */
1031                                 bnxt_free_filter(bp, mf);
1032                                 flow->filter = nf;
1033                                 return -EXDEV;
1034                         }
1035                 }
1036         }
1037         return 0;
1038 }
1039
1040 static struct rte_flow *
1041 bnxt_flow_create(struct rte_eth_dev *dev,
1042                   const struct rte_flow_attr *attr,
1043                   const struct rte_flow_item pattern[],
1044                   const struct rte_flow_action actions[],
1045                   struct rte_flow_error *error)
1046 {
1047         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1048         struct bnxt_filter_info *filter;
1049         struct bnxt_vnic_info *vnic = NULL;
1050         bool update_flow = false;
1051         struct rte_flow *flow;
1052         unsigned int i;
1053         int ret = 0;
1054
1055         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1056         if (!flow) {
1057                 rte_flow_error_set(error, ENOMEM,
1058                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1059                                    "Failed to allocate memory");
1060                 return flow;
1061         }
1062
1063         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1064         if (ret != 0) {
1065                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1066                 goto free_flow;
1067         }
1068
1069         filter = bnxt_get_unused_filter(bp);
1070         if (filter == NULL) {
1071                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1072                 goto free_flow;
1073         }
1074
1075         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1076                                            error, filter);
1077         if (ret != 0)
1078                 goto free_filter;
1079
1080         ret = bnxt_match_filter(bp, filter);
1081         if (ret == -EEXIST) {
1082                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1083                 /* Clear the filter that was created as part of
1084                  * validate_and_parse_flow() above
1085                  */
1086                 bnxt_hwrm_clear_l2_filter(bp, filter);
1087                 goto free_filter;
1088         } else if (ret == -EXDEV) {
1089                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1090                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1091                 update_flow = true;
1092         }
1093
1094         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1095                 filter->enables |=
1096                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1097                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1098         }
1099         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1100                 filter->enables |=
1101                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1102                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1103         }
1104
1105         for (i = 0; i < bp->nr_vnics; i++) {
1106                 vnic = &bp->vnic_info[i];
1107                 if (filter->dst_id == vnic->fw_vnic_id)
1108                         break;
1109         }
1110
1111         if (!ret) {
1112                 flow->filter = filter;
1113                 flow->vnic = vnic;
1114                 if (update_flow) {
1115                         ret = -EXDEV;
1116                         goto free_flow;
1117                 }
1118                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1119                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1120                 return flow;
1121         }
1122 free_filter:
1123         bnxt_free_filter(bp, filter);
1124 free_flow:
1125         if (ret == -EEXIST)
1126                 rte_flow_error_set(error, ret,
1127                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128                                    "Matching Flow exists.");
1129         else if (ret == -EXDEV)
1130                 rte_flow_error_set(error, ret,
1131                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1132                                    "Flow with pattern exists, updating destination queue");
1133         else
1134                 rte_flow_error_set(error, -ret,
1135                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1136                                    "Failed to create flow.");
1137         rte_free(flow);
1138         flow = NULL;
1139         return flow;
1140 }
1141
1142 static int
1143 bnxt_flow_destroy(struct rte_eth_dev *dev,
1144                   struct rte_flow *flow,
1145                   struct rte_flow_error *error)
1146 {
1147         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1148         struct bnxt_filter_info *filter = flow->filter;
1149         struct bnxt_vnic_info *vnic = flow->vnic;
1150         int ret = 0;
1151
1152         ret = bnxt_match_filter(bp, filter);
1153         if (ret == 0)
1154                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1155         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1156                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1157         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1158                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1159         else
1160                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1161         if (!ret) {
1162                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1163                 rte_free(flow);
1164         } else {
1165                 rte_flow_error_set(error, -ret,
1166                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1167                                    "Failed to destroy flow.");
1168         }
1169
1170         return ret;
1171 }
1172
1173 static int
1174 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1175 {
1176         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1177         struct bnxt_vnic_info *vnic;
1178         struct rte_flow *flow;
1179         unsigned int i;
1180         int ret = 0;
1181
1182         for (i = 0; i < bp->nr_vnics; i++) {
1183                 vnic = &bp->vnic_info[i];
1184                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1185                         struct bnxt_filter_info *filter = flow->filter;
1186
1187                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1188                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1189                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1190                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1191
1192                         if (ret) {
1193                                 rte_flow_error_set(error, -ret,
1194                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1195                                                    NULL,
1196                                                    "Failed to flush flow in HW.");
1197                                 return -rte_errno;
1198                         }
1199
1200                         STAILQ_REMOVE(&vnic->flow_list, flow,
1201                                       rte_flow, next);
1202                         rte_free(flow);
1203                 }
1204         }
1205
1206         return ret;
1207 }
1208
1209 const struct rte_flow_ops bnxt_flow_ops = {
1210         .validate = bnxt_flow_validate,
1211         .create = bnxt_flow_create,
1212         .destroy = bnxt_flow_destroy,
1213         .flush = bnxt_flow_flush,
1214 };