25806bdc070984171ab2ba0a3f1bb946614b1ddf
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22  * Filter Functions
23  */
24
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 {
27         struct bnxt_filter_info *filter;
28
29         /* Find the 1st unused filter from the free_filter_list pool*/
30         filter = STAILQ_FIRST(&bp->free_filter_list);
31         if (!filter) {
32                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
33                 return NULL;
34         }
35         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36
37         /* Default to L2 MAC Addr filter */
38         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42                ETHER_ADDR_LEN);
43         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
44         return filter;
45 }
46
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 {
49         struct bnxt_filter_info *filter;
50
51         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52         if (!filter) {
53                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
54                         vf);
55                 return NULL;
56         }
57
58         filter->fw_l2_filter_id = UINT64_MAX;
59         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
60         return filter;
61 }
62
63 void bnxt_init_filters(struct bnxt *bp)
64 {
65         struct bnxt_filter_info *filter;
66         int i, max_filters;
67
68         max_filters = bp->max_l2_ctx;
69         STAILQ_INIT(&bp->free_filter_list);
70         for (i = 0; i < max_filters; i++) {
71                 filter = &bp->filter_info[i];
72                 filter->fw_l2_filter_id = UINT64_MAX;
73                 filter->fw_em_filter_id = UINT64_MAX;
74                 filter->fw_ntuple_filter_id = UINT64_MAX;
75                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
76         }
77 }
78
79 void bnxt_free_all_filters(struct bnxt *bp)
80 {
81         struct bnxt_vnic_info *vnic;
82         struct bnxt_filter_info *filter, *temp_filter;
83         int i;
84
85         for (i = 0; i < MAX_FF_POOLS; i++) {
86                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87                         filter = STAILQ_FIRST(&vnic->filter);
88                         while (filter) {
89                                 temp_filter = STAILQ_NEXT(filter, next);
90                                 STAILQ_REMOVE(&vnic->filter, filter,
91                                               bnxt_filter_info, next);
92                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
93                                                    filter, next);
94                                 filter = temp_filter;
95                         }
96                         STAILQ_INIT(&vnic->filter);
97                 }
98         }
99
100         for (i = 0; i < bp->pf.max_vfs; i++) {
101                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102                         bnxt_hwrm_clear_l2_filter(bp, filter);
103                 }
104         }
105 }
106
107 void bnxt_free_filter_mem(struct bnxt *bp)
108 {
109         struct bnxt_filter_info *filter;
110         uint16_t max_filters, i;
111         int rc = 0;
112
113         if (bp->filter_info == NULL)
114                 return;
115
116         /* Ensure that all filters are freed */
117         max_filters = bp->max_l2_ctx;
118         for (i = 0; i < max_filters; i++) {
119                 filter = &bp->filter_info[i];
120                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122                         /* Call HWRM to try to free filter again */
123                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
124                         if (rc)
125                                 PMD_DRV_LOG(ERR,
126                                        "HWRM filter cannot be freed rc = %d\n",
127                                         rc);
128                 }
129                 filter->fw_l2_filter_id = UINT64_MAX;
130         }
131         STAILQ_INIT(&bp->free_filter_list);
132
133         rte_free(bp->filter_info);
134         bp->filter_info = NULL;
135
136         for (i = 0; i < bp->pf.max_vfs; i++) {
137                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
138                         rte_free(filter);
139                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140                                       bnxt_filter_info, next);
141                 }
142         }
143 }
144
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
146 {
147         struct bnxt_filter_info *filter_mem;
148         uint16_t max_filters;
149
150         max_filters = bp->max_l2_ctx;
151         /* Allocate memory for VNIC pool and filter pool */
152         filter_mem = rte_zmalloc("bnxt_filter_info",
153                                  max_filters * sizeof(struct bnxt_filter_info),
154                                  0);
155         if (filter_mem == NULL) {
156                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
157                         max_filters);
158                 return -ENOMEM;
159         }
160         bp->filter_info = filter_mem;
161         return 0;
162 }
163
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter;
167
168         /* Find the 1st unused filter from the free_filter_list pool*/
169         filter = STAILQ_FIRST(&bp->free_filter_list);
170         if (!filter) {
171                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
172                 return NULL;
173         }
174         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
175
176         return filter;
177 }
178
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
180 {
181         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
182 }
183
184 static int
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186                         const struct rte_flow_item pattern[],
187                         const struct rte_flow_action actions[],
188                         struct rte_flow_error *error)
189 {
190         if (!pattern) {
191                 rte_flow_error_set(error, EINVAL,
192                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193                         NULL, "NULL pattern.");
194                 return -rte_errno;
195         }
196
197         if (!actions) {
198                 rte_flow_error_set(error, EINVAL,
199                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200                                    NULL, "NULL action.");
201                 return -rte_errno;
202         }
203
204         if (!attr) {
205                 rte_flow_error_set(error, EINVAL,
206                                    RTE_FLOW_ERROR_TYPE_ATTR,
207                                    NULL, "NULL attribute.");
208                 return -rte_errno;
209         }
210
211         return 0;
212 }
213
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
216 {
217         while (1) {
218                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
219                         return cur;
220                 cur++;
221         }
222 }
223
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
226 {
227         while (1) {
228                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
229                         return cur;
230                 cur++;
231         }
232 }
233
234 int check_zero_bytes(const uint8_t *bytes, int len)
235 {
236         int i;
237         for (i = 0; i < len; i++)
238                 if (bytes[i] != 0x00)
239                         return 0;
240         return 1;
241 }
242
243 static int
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245                        struct rte_flow_error *error __rte_unused)
246 {
247         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
248         int use_ntuple = 1;
249
250         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251                 switch (item->type) {
252                 case RTE_FLOW_ITEM_TYPE_ETH:
253                         use_ntuple = 1;
254                         break;
255                 case RTE_FLOW_ITEM_TYPE_VLAN:
256                         use_ntuple = 0;
257                         break;
258                 case RTE_FLOW_ITEM_TYPE_IPV4:
259                 case RTE_FLOW_ITEM_TYPE_IPV6:
260                 case RTE_FLOW_ITEM_TYPE_TCP:
261                 case RTE_FLOW_ITEM_TYPE_UDP:
262                         /* FALLTHROUGH */
263                         /* need ntuple match, reset exact match */
264                         if (!use_ntuple) {
265                                 PMD_DRV_LOG(ERR,
266                                         "VLAN flow cannot use NTUPLE filter\n");
267                                 rte_flow_error_set(error, EINVAL,
268                                                    RTE_FLOW_ERROR_TYPE_ITEM,
269                                                    item,
270                                                    "Cannot use VLAN with NTUPLE");
271                                 return -rte_errno;
272                         }
273                         use_ntuple |= 1;
274                         break;
275                 default:
276                         PMD_DRV_LOG(ERR, "Unknown Flow type");
277                         use_ntuple |= 1;
278                 }
279                 item++;
280         }
281         return use_ntuple;
282 }
283
284 static int
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286                                   const struct rte_flow_item pattern[],
287                                   struct rte_flow_error *error,
288                                   struct bnxt_filter_info *filter)
289 {
290         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
291         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
292         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
293         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
294         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
295         const struct rte_flow_item_udp *udp_spec, *udp_mask;
296         const struct rte_flow_item_eth *eth_spec, *eth_mask;
297         const struct rte_flow_item_nvgre *nvgre_spec;
298         const struct rte_flow_item_nvgre *nvgre_mask;
299         const struct rte_flow_item_vxlan *vxlan_spec;
300         const struct rte_flow_item_vxlan *vxlan_mask;
301         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
302         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
303         const struct rte_flow_item_vf *vf_spec;
304         uint32_t tenant_id_be = 0;
305         bool vni_masked = 0;
306         bool tni_masked = 0;
307         uint32_t vf = 0;
308         int use_ntuple;
309         uint32_t en = 0;
310         uint32_t en_ethertype;
311         int dflt_vnic;
312
313         use_ntuple = bnxt_filter_type_check(pattern, error);
314         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
315         if (use_ntuple < 0)
316                 return use_ntuple;
317
318         filter->filter_type = use_ntuple ?
319                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
320         en_ethertype = use_ntuple ?
321                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
322                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
323
324         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
325                 if (item->last) {
326                         /* last or range is NOT supported as match criteria */
327                         rte_flow_error_set(error, EINVAL,
328                                            RTE_FLOW_ERROR_TYPE_ITEM,
329                                            item,
330                                            "No support for range");
331                         return -rte_errno;
332                 }
333                 if (!item->spec || !item->mask) {
334                         rte_flow_error_set(error, EINVAL,
335                                            RTE_FLOW_ERROR_TYPE_ITEM,
336                                            item,
337                                            "spec/mask is NULL");
338                         return -rte_errno;
339                 }
340                 switch (item->type) {
341                 case RTE_FLOW_ITEM_TYPE_ETH:
342                         eth_spec = item->spec;
343                         eth_mask = item->mask;
344
345                         /* Source MAC address mask cannot be partially set.
346                          * Should be All 0's or all 1's.
347                          * Destination MAC address mask must not be partially
348                          * set. Should be all 1's or all 0's.
349                          */
350                         if ((!is_zero_ether_addr(&eth_mask->src) &&
351                              !is_broadcast_ether_addr(&eth_mask->src)) ||
352                             (!is_zero_ether_addr(&eth_mask->dst) &&
353                              !is_broadcast_ether_addr(&eth_mask->dst))) {
354                                 rte_flow_error_set(error, EINVAL,
355                                                    RTE_FLOW_ERROR_TYPE_ITEM,
356                                                    item,
357                                                    "MAC_addr mask not valid");
358                                 return -rte_errno;
359                         }
360
361                         /* Mask is not allowed. Only exact matches are */
362                         if (eth_mask->type &&
363                             eth_mask->type != RTE_BE16(0xffff)) {
364                                 rte_flow_error_set(error, EINVAL,
365                                                    RTE_FLOW_ERROR_TYPE_ITEM,
366                                                    item,
367                                                    "ethertype mask not valid");
368                                 return -rte_errno;
369                         }
370
371                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
372                                 rte_memcpy(filter->dst_macaddr,
373                                            &eth_spec->dst, 6);
374                                 en |= use_ntuple ?
375                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
376                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
377                         }
378                         if (is_broadcast_ether_addr(&eth_mask->src)) {
379                                 rte_memcpy(filter->src_macaddr,
380                                            &eth_spec->src, 6);
381                                 en |= use_ntuple ?
382                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
383                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
384                         } /*
385                            * else {
386                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
387                            * }
388                            */
389                         if (eth_mask->type) {
390                                 filter->ethertype =
391                                         rte_be_to_cpu_16(eth_spec->type);
392                                 en |= en_ethertype;
393                         }
394
395                         break;
396                 case RTE_FLOW_ITEM_TYPE_VLAN:
397                         vlan_spec = item->spec;
398                         vlan_mask = item->mask;
399                         if (en & en_ethertype) {
400                                 rte_flow_error_set(error, EINVAL,
401                                                    RTE_FLOW_ERROR_TYPE_ITEM,
402                                                    item,
403                                                    "VLAN TPID matching is not"
404                                                    " supported");
405                                 return -rte_errno;
406                         }
407                         if (vlan_mask->tci &&
408                             vlan_mask->tci == RTE_BE16(0x0fff)) {
409                                 /* Only the VLAN ID can be matched. */
410                                 filter->l2_ovlan =
411                                         rte_be_to_cpu_16(vlan_spec->tci &
412                                                          RTE_BE16(0x0fff));
413                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
414                         } else if (vlan_mask->tci) {
415                                 rte_flow_error_set(error, EINVAL,
416                                                    RTE_FLOW_ERROR_TYPE_ITEM,
417                                                    item,
418                                                    "VLAN mask is invalid");
419                                 return -rte_errno;
420                         }
421                         if (vlan_mask->inner_type &&
422                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
423                                 rte_flow_error_set(error, EINVAL,
424                                                    RTE_FLOW_ERROR_TYPE_ITEM,
425                                                    item,
426                                                    "inner ethertype mask not"
427                                                    " valid");
428                                 return -rte_errno;
429                         }
430                         if (vlan_mask->inner_type) {
431                                 filter->ethertype =
432                                         rte_be_to_cpu_16(vlan_spec->inner_type);
433                                 en |= en_ethertype;
434                         }
435
436                         break;
437                 case RTE_FLOW_ITEM_TYPE_IPV4:
438                         /* If mask is not involved, we could use EM filters. */
439                         ipv4_spec = item->spec;
440                         ipv4_mask = item->mask;
441                         /* Only IP DST and SRC fields are maskable. */
442                         if (ipv4_mask->hdr.version_ihl ||
443                             ipv4_mask->hdr.type_of_service ||
444                             ipv4_mask->hdr.total_length ||
445                             ipv4_mask->hdr.packet_id ||
446                             ipv4_mask->hdr.fragment_offset ||
447                             ipv4_mask->hdr.time_to_live ||
448                             ipv4_mask->hdr.next_proto_id ||
449                             ipv4_mask->hdr.hdr_checksum) {
450                                 rte_flow_error_set(error, EINVAL,
451                                            RTE_FLOW_ERROR_TYPE_ITEM,
452                                            item,
453                                            "Invalid IPv4 mask.");
454                                 return -rte_errno;
455                         }
456                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
457                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
458                         if (use_ntuple)
459                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
460                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
461                         else
462                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
463                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
464                         if (ipv4_mask->hdr.src_addr) {
465                                 filter->src_ipaddr_mask[0] =
466                                         ipv4_mask->hdr.src_addr;
467                                 en |= !use_ntuple ? 0 :
468                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
469                         }
470                         if (ipv4_mask->hdr.dst_addr) {
471                                 filter->dst_ipaddr_mask[0] =
472                                         ipv4_mask->hdr.dst_addr;
473                                 en |= !use_ntuple ? 0 :
474                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
475                         }
476                         filter->ip_addr_type = use_ntuple ?
477                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
478                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
479                         if (ipv4_spec->hdr.next_proto_id) {
480                                 filter->ip_protocol =
481                                         ipv4_spec->hdr.next_proto_id;
482                                 if (use_ntuple)
483                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
484                                 else
485                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
486                         }
487                         break;
488                 case RTE_FLOW_ITEM_TYPE_IPV6:
489                         ipv6_spec = item->spec;
490                         ipv6_mask = item->mask;
491
492                         /* Only IP DST and SRC fields are maskable. */
493                         if (ipv6_mask->hdr.vtc_flow ||
494                             ipv6_mask->hdr.payload_len ||
495                             ipv6_mask->hdr.proto ||
496                             ipv6_mask->hdr.hop_limits) {
497                                 rte_flow_error_set(error, EINVAL,
498                                            RTE_FLOW_ERROR_TYPE_ITEM,
499                                            item,
500                                            "Invalid IPv6 mask.");
501                                 return -rte_errno;
502                         }
503
504                         if (use_ntuple)
505                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
506                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
507                         else
508                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
509                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
510                         rte_memcpy(filter->src_ipaddr,
511                                    ipv6_spec->hdr.src_addr, 16);
512                         rte_memcpy(filter->dst_ipaddr,
513                                    ipv6_spec->hdr.dst_addr, 16);
514                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
515                                 rte_memcpy(filter->src_ipaddr_mask,
516                                            ipv6_mask->hdr.src_addr, 16);
517                                 en |= !use_ntuple ? 0 :
518                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
519                         }
520                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
521                                 rte_memcpy(filter->dst_ipaddr_mask,
522                                            ipv6_mask->hdr.dst_addr, 16);
523                                 en |= !use_ntuple ? 0 :
524                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
525                         }
526                         filter->ip_addr_type = use_ntuple ?
527                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
528                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
529                         break;
530                 case RTE_FLOW_ITEM_TYPE_TCP:
531                         tcp_spec = item->spec;
532                         tcp_mask = item->mask;
533
534                         /* Check TCP mask. Only DST & SRC ports are maskable */
535                         if (tcp_mask->hdr.sent_seq ||
536                             tcp_mask->hdr.recv_ack ||
537                             tcp_mask->hdr.data_off ||
538                             tcp_mask->hdr.tcp_flags ||
539                             tcp_mask->hdr.rx_win ||
540                             tcp_mask->hdr.cksum ||
541                             tcp_mask->hdr.tcp_urp) {
542                                 rte_flow_error_set(error, EINVAL,
543                                            RTE_FLOW_ERROR_TYPE_ITEM,
544                                            item,
545                                            "Invalid TCP mask");
546                                 return -rte_errno;
547                         }
548                         filter->src_port = tcp_spec->hdr.src_port;
549                         filter->dst_port = tcp_spec->hdr.dst_port;
550                         if (use_ntuple)
551                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
552                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
553                         else
554                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
555                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
556                         if (tcp_mask->hdr.dst_port) {
557                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
558                                 en |= !use_ntuple ? 0 :
559                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
560                         }
561                         if (tcp_mask->hdr.src_port) {
562                                 filter->src_port_mask = tcp_mask->hdr.src_port;
563                                 en |= !use_ntuple ? 0 :
564                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
565                         }
566                         break;
567                 case RTE_FLOW_ITEM_TYPE_UDP:
568                         udp_spec = item->spec;
569                         udp_mask = item->mask;
570
571                         if (udp_mask->hdr.dgram_len ||
572                             udp_mask->hdr.dgram_cksum) {
573                                 rte_flow_error_set(error, EINVAL,
574                                            RTE_FLOW_ERROR_TYPE_ITEM,
575                                            item,
576                                            "Invalid UDP mask");
577                                 return -rte_errno;
578                         }
579
580                         filter->src_port = udp_spec->hdr.src_port;
581                         filter->dst_port = udp_spec->hdr.dst_port;
582                         if (use_ntuple)
583                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
584                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
585                         else
586                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
587                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
588
589                         if (udp_mask->hdr.dst_port) {
590                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
591                                 en |= !use_ntuple ? 0 :
592                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
593                         }
594                         if (udp_mask->hdr.src_port) {
595                                 filter->src_port_mask = udp_mask->hdr.src_port;
596                                 en |= !use_ntuple ? 0 :
597                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
598                         }
599                         break;
600                 case RTE_FLOW_ITEM_TYPE_VXLAN:
601                         vxlan_spec = item->spec;
602                         vxlan_mask = item->mask;
603                         /* Check if VXLAN item is used to describe protocol.
604                          * If yes, both spec and mask should be NULL.
605                          * If no, both spec and mask shouldn't be NULL.
606                          */
607                         if ((!vxlan_spec && vxlan_mask) ||
608                             (vxlan_spec && !vxlan_mask)) {
609                                 rte_flow_error_set(error, EINVAL,
610                                            RTE_FLOW_ERROR_TYPE_ITEM,
611                                            item,
612                                            "Invalid VXLAN item");
613                                 return -rte_errno;
614                         }
615
616                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
617                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
618                             vxlan_spec->flags != 0x8) {
619                                 rte_flow_error_set(error, EINVAL,
620                                            RTE_FLOW_ERROR_TYPE_ITEM,
621                                            item,
622                                            "Invalid VXLAN item");
623                                 return -rte_errno;
624                         }
625
626                         /* Check if VNI is masked. */
627                         if (vxlan_spec && vxlan_mask) {
628                                 vni_masked =
629                                         !!memcmp(vxlan_mask->vni, vni_mask,
630                                                  RTE_DIM(vni_mask));
631                                 if (vni_masked) {
632                                         rte_flow_error_set(error, EINVAL,
633                                                    RTE_FLOW_ERROR_TYPE_ITEM,
634                                                    item,
635                                                    "Invalid VNI mask");
636                                         return -rte_errno;
637                                 }
638
639                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
640                                            vxlan_spec->vni, 3);
641                                 filter->vni =
642                                         rte_be_to_cpu_32(tenant_id_be);
643                                 filter->tunnel_type =
644                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
645                         }
646                         break;
647                 case RTE_FLOW_ITEM_TYPE_NVGRE:
648                         nvgre_spec = item->spec;
649                         nvgre_mask = item->mask;
650                         /* Check if NVGRE item is used to describe protocol.
651                          * If yes, both spec and mask should be NULL.
652                          * If no, both spec and mask shouldn't be NULL.
653                          */
654                         if ((!nvgre_spec && nvgre_mask) ||
655                             (nvgre_spec && !nvgre_mask)) {
656                                 rte_flow_error_set(error, EINVAL,
657                                            RTE_FLOW_ERROR_TYPE_ITEM,
658                                            item,
659                                            "Invalid NVGRE item");
660                                 return -rte_errno;
661                         }
662
663                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
664                             nvgre_spec->protocol != 0x6558) {
665                                 rte_flow_error_set(error, EINVAL,
666                                            RTE_FLOW_ERROR_TYPE_ITEM,
667                                            item,
668                                            "Invalid NVGRE item");
669                                 return -rte_errno;
670                         }
671
672                         if (nvgre_spec && nvgre_mask) {
673                                 tni_masked =
674                                         !!memcmp(nvgre_mask->tni, tni_mask,
675                                                  RTE_DIM(tni_mask));
676                                 if (tni_masked) {
677                                         rte_flow_error_set(error, EINVAL,
678                                                        RTE_FLOW_ERROR_TYPE_ITEM,
679                                                        item,
680                                                        "Invalid TNI mask");
681                                         return -rte_errno;
682                                 }
683                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
684                                            nvgre_spec->tni, 3);
685                                 filter->vni =
686                                         rte_be_to_cpu_32(tenant_id_be);
687                                 filter->tunnel_type =
688                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
689                         }
690                         break;
691                 case RTE_FLOW_ITEM_TYPE_VF:
692                         vf_spec = item->spec;
693                         vf = vf_spec->id;
694                         if (!BNXT_PF(bp)) {
695                                 rte_flow_error_set(error, EINVAL,
696                                            RTE_FLOW_ERROR_TYPE_ITEM,
697                                            item,
698                                            "Configuring on a VF!");
699                                 return -rte_errno;
700                         }
701
702                         if (vf >= bp->pdev->max_vfs) {
703                                 rte_flow_error_set(error, EINVAL,
704                                            RTE_FLOW_ERROR_TYPE_ITEM,
705                                            item,
706                                            "Incorrect VF id!");
707                                 return -rte_errno;
708                         }
709
710                         filter->mirror_vnic_id =
711                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
712                         if (dflt_vnic < 0) {
713                                 /* This simply indicates there's no driver
714                                  * loaded. This is not an error.
715                                  */
716                                 rte_flow_error_set(error, EINVAL,
717                                            RTE_FLOW_ERROR_TYPE_ITEM,
718                                            item,
719                                            "Unable to get default VNIC for VF");
720                                 return -rte_errno;
721                         }
722                         filter->mirror_vnic_id = dflt_vnic;
723                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
724                         break;
725                 default:
726                         break;
727                 }
728                 item++;
729         }
730         filter->enables = en;
731
732         return 0;
733 }
734
735 /* Parse attributes */
736 static int
737 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
738                      struct rte_flow_error *error)
739 {
740         /* Must be input direction */
741         if (!attr->ingress) {
742                 rte_flow_error_set(error, EINVAL,
743                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                    attr, "Only support ingress.");
745                 return -rte_errno;
746         }
747
748         /* Not supported */
749         if (attr->egress) {
750                 rte_flow_error_set(error, EINVAL,
751                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752                                    attr, "No support for egress.");
753                 return -rte_errno;
754         }
755
756         /* Not supported */
757         if (attr->priority) {
758                 rte_flow_error_set(error, EINVAL,
759                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760                                    attr, "No support for priority.");
761                 return -rte_errno;
762         }
763
764         /* Not supported */
765         if (attr->group) {
766                 rte_flow_error_set(error, EINVAL,
767                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768                                    attr, "No support for group.");
769                 return -rte_errno;
770         }
771
772         return 0;
773 }
774
775 struct bnxt_filter_info *
776 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
777                    struct bnxt_vnic_info *vnic)
778 {
779         struct bnxt_filter_info *filter1, *f0;
780         struct bnxt_vnic_info *vnic0;
781         int rc;
782
783         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
784         f0 = STAILQ_FIRST(&vnic0->filter);
785
786         //This flow has same DST MAC as the port/l2 filter.
787         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
788                 return f0;
789
790         //This flow needs DST MAC which is not same as port/l2
791         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
792         filter1 = bnxt_get_unused_filter(bp);
793         if (filter1 == NULL)
794                 return NULL;
795         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
796         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
797                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
798         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
799         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
800         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
801                                      filter1);
802         if (rc) {
803                 bnxt_free_filter(bp, filter1);
804                 return NULL;
805         }
806         return filter1;
807 }
808
809 static int
810 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
811                              const struct rte_flow_item pattern[],
812                              const struct rte_flow_action actions[],
813                              const struct rte_flow_attr *attr,
814                              struct rte_flow_error *error,
815                              struct bnxt_filter_info *filter)
816 {
817         const struct rte_flow_action *act = nxt_non_void_action(actions);
818         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
819         const struct rte_flow_action_queue *act_q;
820         const struct rte_flow_action_vf *act_vf;
821         struct bnxt_vnic_info *vnic, *vnic0;
822         struct bnxt_filter_info *filter1;
823         uint32_t vf = 0;
824         int dflt_vnic;
825         int rc;
826
827         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
828                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
829                 rte_flow_error_set(error, EINVAL,
830                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
831                                    "Cannot create flow on RSS queues");
832                 rc = -rte_errno;
833                 goto ret;
834         }
835
836         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
837         if (rc != 0)
838                 goto ret;
839
840         rc = bnxt_flow_parse_attr(attr, error);
841         if (rc != 0)
842                 goto ret;
843         //Since we support ingress attribute only - right now.
844         if (filter->filter_type == HWRM_CFA_EM_FILTER)
845                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
846
847         switch (act->type) {
848         case RTE_FLOW_ACTION_TYPE_QUEUE:
849                 /* Allow this flow. Redirect to a VNIC. */
850                 act_q = (const struct rte_flow_action_queue *)act->conf;
851                 if (act_q->index >= bp->rx_nr_rings) {
852                         rte_flow_error_set(error, EINVAL,
853                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
854                                            "Invalid queue ID.");
855                         rc = -rte_errno;
856                         goto ret;
857                 }
858                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
859
860                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
861                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
862                 if (vnic == NULL) {
863                         rte_flow_error_set(error, EINVAL,
864                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
865                                            "No matching VNIC for queue ID.");
866                         rc = -rte_errno;
867                         goto ret;
868                 }
869                 filter->dst_id = vnic->fw_vnic_id;
870                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
871                 if (filter1 == NULL) {
872                         rc = -ENOSPC;
873                         goto ret;
874                 }
875                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
876                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
877                 break;
878         case RTE_FLOW_ACTION_TYPE_DROP:
879                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
880                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
881                 if (filter1 == NULL) {
882                         rc = -ENOSPC;
883                         goto ret;
884                 }
885                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
886                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
887                         filter->flags =
888                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
889                 else
890                         filter->flags =
891                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
892                 break;
893         case RTE_FLOW_ACTION_TYPE_COUNT:
894                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
895                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
896                 if (filter1 == NULL) {
897                         rc = -ENOSPC;
898                         goto ret;
899                 }
900                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
901                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
902                 break;
903         case RTE_FLOW_ACTION_TYPE_VF:
904                 act_vf = (const struct rte_flow_action_vf *)act->conf;
905                 vf = act_vf->id;
906                 if (!BNXT_PF(bp)) {
907                         rte_flow_error_set(error, EINVAL,
908                                    RTE_FLOW_ERROR_TYPE_ACTION,
909                                    act,
910                                    "Configuring on a VF!");
911                         rc = -rte_errno;
912                         goto ret;
913                 }
914
915                 if (vf >= bp->pdev->max_vfs) {
916                         rte_flow_error_set(error, EINVAL,
917                                    RTE_FLOW_ERROR_TYPE_ACTION,
918                                    act,
919                                    "Incorrect VF id!");
920                         rc = -rte_errno;
921                         goto ret;
922                 }
923
924                 filter->mirror_vnic_id =
925                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
926                 if (dflt_vnic < 0) {
927                         /* This simply indicates there's no driver loaded.
928                          * This is not an error.
929                          */
930                         rte_flow_error_set(error, EINVAL,
931                                    RTE_FLOW_ERROR_TYPE_ACTION,
932                                    act,
933                                    "Unable to get default VNIC for VF");
934                         rc = -rte_errno;
935                         goto ret;
936                 }
937                 filter->mirror_vnic_id = dflt_vnic;
938                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
939
940                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
941                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
942                 if (filter1 == NULL) {
943                         rc = -ENOSPC;
944                         goto ret;
945                 }
946                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
947                 break;
948
949         default:
950                 rte_flow_error_set(error, EINVAL,
951                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
952                                    "Invalid action.");
953                 rc = -rte_errno;
954                 goto ret;
955         }
956
957         act = nxt_non_void_action(++act);
958         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
959                 rte_flow_error_set(error, EINVAL,
960                                    RTE_FLOW_ERROR_TYPE_ACTION,
961                                    act, "Invalid action.");
962                 rc = -rte_errno;
963                 goto ret;
964         }
965 ret:
966         return rc;
967 }
968
969 static int
970 bnxt_flow_validate(struct rte_eth_dev *dev,
971                 const struct rte_flow_attr *attr,
972                 const struct rte_flow_item pattern[],
973                 const struct rte_flow_action actions[],
974                 struct rte_flow_error *error)
975 {
976         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
977         struct bnxt_filter_info *filter;
978         int ret = 0;
979
980         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
981         if (ret != 0)
982                 return ret;
983
984         filter = bnxt_get_unused_filter(bp);
985         if (filter == NULL) {
986                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
987                 return -ENOMEM;
988         }
989
990         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
991                                            error, filter);
992         /* No need to hold on to this filter if we are just validating flow */
993         filter->fw_l2_filter_id = UINT64_MAX;
994         bnxt_free_filter(bp, filter);
995
996         return ret;
997 }
998
999 static int
1000 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1001 {
1002         struct bnxt_filter_info *mf;
1003         struct rte_flow *flow;
1004         int i;
1005
1006         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1007                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1008
1009                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1010                         mf = flow->filter;
1011
1012                         if (mf->filter_type == nf->filter_type &&
1013                             mf->flags == nf->flags &&
1014                             mf->src_port == nf->src_port &&
1015                             mf->src_port_mask == nf->src_port_mask &&
1016                             mf->dst_port == nf->dst_port &&
1017                             mf->dst_port_mask == nf->dst_port_mask &&
1018                             mf->ip_protocol == nf->ip_protocol &&
1019                             mf->ip_addr_type == nf->ip_addr_type &&
1020                             mf->ethertype == nf->ethertype &&
1021                             mf->vni == nf->vni &&
1022                             mf->tunnel_type == nf->tunnel_type &&
1023                             mf->l2_ovlan == nf->l2_ovlan &&
1024                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1025                             mf->l2_ivlan == nf->l2_ivlan &&
1026                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1027                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1028                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1029                                     ETHER_ADDR_LEN) &&
1030                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1031                                     ETHER_ADDR_LEN) &&
1032                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1033                                     ETHER_ADDR_LEN) &&
1034                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1035                                     sizeof(nf->src_ipaddr)) &&
1036                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1037                                     sizeof(nf->src_ipaddr_mask)) &&
1038                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1039                                     sizeof(nf->dst_ipaddr)) &&
1040                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1041                                     sizeof(nf->dst_ipaddr_mask))) {
1042                                 if (mf->dst_id == nf->dst_id)
1043                                         return -EEXIST;
1044                                 /* Same Flow, Different queue
1045                                  * Clear the old ntuple filter
1046                                  */
1047                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1048                                         bnxt_hwrm_clear_em_filter(bp, mf);
1049                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1050                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1051                                 /* Free the old filter, update flow
1052                                  * with new filter
1053                                  */
1054                                 bnxt_free_filter(bp, mf);
1055                                 flow->filter = nf;
1056                                 return -EXDEV;
1057                         }
1058                 }
1059         }
1060         return 0;
1061 }
1062
1063 static struct rte_flow *
1064 bnxt_flow_create(struct rte_eth_dev *dev,
1065                   const struct rte_flow_attr *attr,
1066                   const struct rte_flow_item pattern[],
1067                   const struct rte_flow_action actions[],
1068                   struct rte_flow_error *error)
1069 {
1070         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1071         struct bnxt_filter_info *filter;
1072         struct bnxt_vnic_info *vnic = NULL;
1073         bool update_flow = false;
1074         struct rte_flow *flow;
1075         unsigned int i;
1076         int ret = 0;
1077
1078         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1079         if (!flow) {
1080                 rte_flow_error_set(error, ENOMEM,
1081                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1082                                    "Failed to allocate memory");
1083                 return flow;
1084         }
1085
1086         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1087         if (ret != 0) {
1088                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1089                 goto free_flow;
1090         }
1091
1092         filter = bnxt_get_unused_filter(bp);
1093         if (filter == NULL) {
1094                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1095                 goto free_flow;
1096         }
1097
1098         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1099                                            error, filter);
1100         if (ret != 0)
1101                 goto free_filter;
1102
1103         ret = bnxt_match_filter(bp, filter);
1104         if (ret == -EEXIST) {
1105                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1106                 /* Clear the filter that was created as part of
1107                  * validate_and_parse_flow() above
1108                  */
1109                 bnxt_hwrm_clear_l2_filter(bp, filter);
1110                 goto free_filter;
1111         } else if (ret == -EXDEV) {
1112                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1113                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1114                 update_flow = true;
1115         }
1116
1117         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1118                 filter->enables |=
1119                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1120                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1121         }
1122         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1123                 filter->enables |=
1124                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1125                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1126         }
1127
1128         for (i = 0; i < bp->nr_vnics; i++) {
1129                 vnic = &bp->vnic_info[i];
1130                 if (filter->dst_id == vnic->fw_vnic_id)
1131                         break;
1132         }
1133
1134         if (!ret) {
1135                 flow->filter = filter;
1136                 flow->vnic = vnic;
1137                 if (update_flow) {
1138                         ret = -EXDEV;
1139                         goto free_flow;
1140                 }
1141                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1142                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1143                 return flow;
1144         }
1145 free_filter:
1146         bnxt_free_filter(bp, filter);
1147 free_flow:
1148         if (ret == -EEXIST)
1149                 rte_flow_error_set(error, ret,
1150                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1151                                    "Matching Flow exists.");
1152         else if (ret == -EXDEV)
1153                 rte_flow_error_set(error, ret,
1154                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1155                                    "Flow with pattern exists, updating destination queue");
1156         else
1157                 rte_flow_error_set(error, -ret,
1158                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1159                                    "Failed to create flow.");
1160         rte_free(flow);
1161         flow = NULL;
1162         return flow;
1163 }
1164
1165 static int
1166 bnxt_flow_destroy(struct rte_eth_dev *dev,
1167                   struct rte_flow *flow,
1168                   struct rte_flow_error *error)
1169 {
1170         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1171         struct bnxt_filter_info *filter = flow->filter;
1172         struct bnxt_vnic_info *vnic = flow->vnic;
1173         int ret = 0;
1174
1175         ret = bnxt_match_filter(bp, filter);
1176         if (ret == 0)
1177                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1178         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1179                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1180         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1181                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1182         else
1183                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1184         if (!ret) {
1185                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1186                 rte_free(flow);
1187         } else {
1188                 rte_flow_error_set(error, -ret,
1189                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1190                                    "Failed to destroy flow.");
1191         }
1192
1193         return ret;
1194 }
1195
1196 static int
1197 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1198 {
1199         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1200         struct bnxt_vnic_info *vnic;
1201         struct rte_flow *flow;
1202         unsigned int i;
1203         int ret = 0;
1204
1205         for (i = 0; i < bp->nr_vnics; i++) {
1206                 vnic = &bp->vnic_info[i];
1207                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1208                         struct bnxt_filter_info *filter = flow->filter;
1209
1210                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1211                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1212                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1213                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1214
1215                         if (ret) {
1216                                 rte_flow_error_set(error, -ret,
1217                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1218                                                    NULL,
1219                                                    "Failed to flush flow in HW.");
1220                                 return -rte_errno;
1221                         }
1222
1223                         STAILQ_REMOVE(&vnic->flow_list, flow,
1224                                       rte_flow, next);
1225                         rte_free(flow);
1226                 }
1227         }
1228
1229         return ret;
1230 }
1231
1232 const struct rte_flow_ops bnxt_flow_ops = {
1233         .validate = bnxt_flow_validate,
1234         .create = bnxt_flow_create,
1235         .destroy = bnxt_flow_destroy,
1236         .flush = bnxt_flow_flush,
1237 };