net/bnxt: convert to SPDX license tag
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
19
20 /*
21  * Filter Functions
22  */
23
24 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
25 {
26         struct bnxt_filter_info *filter;
27
28         /* Find the 1st unused filter from the free_filter_list pool*/
29         filter = STAILQ_FIRST(&bp->free_filter_list);
30         if (!filter) {
31                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
32                 return NULL;
33         }
34         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
35
36         /* Default to L2 MAC Addr filter */
37         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
38         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
39                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
40         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
41                ETHER_ADDR_LEN);
42         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
43         return filter;
44 }
45
46 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
47 {
48         struct bnxt_filter_info *filter;
49
50         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
51         if (!filter) {
52                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
53                         vf);
54                 return NULL;
55         }
56
57         filter->fw_l2_filter_id = UINT64_MAX;
58         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
59         return filter;
60 }
61
62 void bnxt_init_filters(struct bnxt *bp)
63 {
64         struct bnxt_filter_info *filter;
65         int i, max_filters;
66
67         max_filters = bp->max_l2_ctx;
68         STAILQ_INIT(&bp->free_filter_list);
69         for (i = 0; i < max_filters; i++) {
70                 filter = &bp->filter_info[i];
71                 filter->fw_l2_filter_id = -1;
72                 filter->fw_em_filter_id = -1;
73                 filter->fw_ntuple_filter_id = -1;
74                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
75         }
76 }
77
78 void bnxt_free_all_filters(struct bnxt *bp)
79 {
80         struct bnxt_vnic_info *vnic;
81         struct bnxt_filter_info *filter, *temp_filter;
82         int i;
83
84         for (i = 0; i < MAX_FF_POOLS; i++) {
85                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
86                         filter = STAILQ_FIRST(&vnic->filter);
87                         while (filter) {
88                                 temp_filter = STAILQ_NEXT(filter, next);
89                                 STAILQ_REMOVE(&vnic->filter, filter,
90                                               bnxt_filter_info, next);
91                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
92                                                    filter, next);
93                                 filter = temp_filter;
94                         }
95                         STAILQ_INIT(&vnic->filter);
96                 }
97         }
98
99         for (i = 0; i < bp->pf.max_vfs; i++) {
100                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
101                         bnxt_hwrm_clear_l2_filter(bp, filter);
102                 }
103         }
104 }
105
106 void bnxt_free_filter_mem(struct bnxt *bp)
107 {
108         struct bnxt_filter_info *filter;
109         uint16_t max_filters, i;
110         int rc = 0;
111
112         if (bp->filter_info == NULL)
113                 return;
114
115         /* Ensure that all filters are freed */
116         max_filters = bp->max_l2_ctx;
117         for (i = 0; i < max_filters; i++) {
118                 filter = &bp->filter_info[i];
119                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
120                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
121                         /* Call HWRM to try to free filter again */
122                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
123                         if (rc)
124                                 PMD_DRV_LOG(ERR,
125                                        "HWRM filter cannot be freed rc = %d\n",
126                                         rc);
127                 }
128                 filter->fw_l2_filter_id = UINT64_MAX;
129         }
130         STAILQ_INIT(&bp->free_filter_list);
131
132         rte_free(bp->filter_info);
133         bp->filter_info = NULL;
134 }
135
136 int bnxt_alloc_filter_mem(struct bnxt *bp)
137 {
138         struct bnxt_filter_info *filter_mem;
139         uint16_t max_filters;
140
141         max_filters = bp->max_l2_ctx;
142         /* Allocate memory for VNIC pool and filter pool */
143         filter_mem = rte_zmalloc("bnxt_filter_info",
144                                  max_filters * sizeof(struct bnxt_filter_info),
145                                  0);
146         if (filter_mem == NULL) {
147                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
148                         max_filters);
149                 return -ENOMEM;
150         }
151         bp->filter_info = filter_mem;
152         return 0;
153 }
154
155 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
156 {
157         struct bnxt_filter_info *filter;
158
159         /* Find the 1st unused filter from the free_filter_list pool*/
160         filter = STAILQ_FIRST(&bp->free_filter_list);
161         if (!filter) {
162                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
163                 return NULL;
164         }
165         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
166
167         return filter;
168 }
169
170 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
171 {
172         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
173 }
174
175 static int
176 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
177                         const struct rte_flow_item pattern[],
178                         const struct rte_flow_action actions[],
179                         struct rte_flow_error *error)
180 {
181         if (!pattern) {
182                 rte_flow_error_set(error, EINVAL,
183                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
184                         NULL, "NULL pattern.");
185                 return -rte_errno;
186         }
187
188         if (!actions) {
189                 rte_flow_error_set(error, EINVAL,
190                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
191                                    NULL, "NULL action.");
192                 return -rte_errno;
193         }
194
195         if (!attr) {
196                 rte_flow_error_set(error, EINVAL,
197                                    RTE_FLOW_ERROR_TYPE_ATTR,
198                                    NULL, "NULL attribute.");
199                 return -rte_errno;
200         }
201
202         return 0;
203 }
204
205 static const struct rte_flow_item *
206 nxt_non_void_pattern(const struct rte_flow_item *cur)
207 {
208         while (1) {
209                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
210                         return cur;
211                 cur++;
212         }
213 }
214
215 static const struct rte_flow_action *
216 nxt_non_void_action(const struct rte_flow_action *cur)
217 {
218         while (1) {
219                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
220                         return cur;
221                 cur++;
222         }
223 }
224
225 int check_zero_bytes(const uint8_t *bytes, int len)
226 {
227         int i;
228         for (i = 0; i < len; i++)
229                 if (bytes[i] != 0x00)
230                         return 0;
231         return 1;
232 }
233
234 static int
235 bnxt_filter_type_check(const struct rte_flow_item pattern[],
236                        struct rte_flow_error *error __rte_unused)
237 {
238         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
239         int use_ntuple = 1;
240
241         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
242                 switch (item->type) {
243                 case RTE_FLOW_ITEM_TYPE_ETH:
244                         use_ntuple = 1;
245                         break;
246                 case RTE_FLOW_ITEM_TYPE_VLAN:
247                         use_ntuple = 0;
248                         break;
249                 case RTE_FLOW_ITEM_TYPE_IPV4:
250                 case RTE_FLOW_ITEM_TYPE_IPV6:
251                 case RTE_FLOW_ITEM_TYPE_TCP:
252                 case RTE_FLOW_ITEM_TYPE_UDP:
253                         /* FALLTHROUGH */
254                         /* need ntuple match, reset exact match */
255                         if (!use_ntuple) {
256                                 PMD_DRV_LOG(ERR,
257                                         "VLAN flow cannot use NTUPLE filter\n");
258                                 rte_flow_error_set(error, EINVAL,
259                                                    RTE_FLOW_ERROR_TYPE_ITEM,
260                                                    item,
261                                                    "Cannot use VLAN with NTUPLE");
262                                 return -rte_errno;
263                         }
264                         use_ntuple |= 1;
265                         break;
266                 default:
267                         PMD_DRV_LOG(ERR, "Unknown Flow type");
268                         use_ntuple |= 1;
269                 }
270                 item++;
271         }
272         return use_ntuple;
273 }
274
275 static int
276 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
277                                   const struct rte_flow_item pattern[],
278                                   struct rte_flow_error *error,
279                                   struct bnxt_filter_info *filter)
280 {
281         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
282         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
283         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
284         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
285         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
286         const struct rte_flow_item_udp *udp_spec, *udp_mask;
287         const struct rte_flow_item_eth *eth_spec, *eth_mask;
288         const struct rte_flow_item_nvgre *nvgre_spec;
289         const struct rte_flow_item_nvgre *nvgre_mask;
290         const struct rte_flow_item_vxlan *vxlan_spec;
291         const struct rte_flow_item_vxlan *vxlan_mask;
292         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
293         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
294         const struct rte_flow_item_vf *vf_spec;
295         uint32_t tenant_id_be = 0;
296         bool vni_masked = 0;
297         bool tni_masked = 0;
298         uint32_t vf = 0;
299         int use_ntuple;
300         uint32_t en = 0;
301         int dflt_vnic;
302
303         use_ntuple = bnxt_filter_type_check(pattern, error);
304         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
305         if (use_ntuple < 0)
306                 return use_ntuple;
307
308         filter->filter_type = use_ntuple ?
309                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
310
311         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
312                 if (item->last) {
313                         /* last or range is NOT supported as match criteria */
314                         rte_flow_error_set(error, EINVAL,
315                                            RTE_FLOW_ERROR_TYPE_ITEM,
316                                            item,
317                                            "No support for range");
318                         return -rte_errno;
319                 }
320                 if (!item->spec || !item->mask) {
321                         rte_flow_error_set(error, EINVAL,
322                                            RTE_FLOW_ERROR_TYPE_ITEM,
323                                            item,
324                                            "spec/mask is NULL");
325                         return -rte_errno;
326                 }
327                 switch (item->type) {
328                 case RTE_FLOW_ITEM_TYPE_ETH:
329                         eth_spec = item->spec;
330                         eth_mask = item->mask;
331
332                         /* Source MAC address mask cannot be partially set.
333                          * Should be All 0's or all 1's.
334                          * Destination MAC address mask must not be partially
335                          * set. Should be all 1's or all 0's.
336                          */
337                         if ((!is_zero_ether_addr(&eth_mask->src) &&
338                              !is_broadcast_ether_addr(&eth_mask->src)) ||
339                             (!is_zero_ether_addr(&eth_mask->dst) &&
340                              !is_broadcast_ether_addr(&eth_mask->dst))) {
341                                 rte_flow_error_set(error, EINVAL,
342                                                    RTE_FLOW_ERROR_TYPE_ITEM,
343                                                    item,
344                                                    "MAC_addr mask not valid");
345                                 return -rte_errno;
346                         }
347
348                         /* Mask is not allowed. Only exact matches are */
349                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
350                                 rte_flow_error_set(error, EINVAL,
351                                                    RTE_FLOW_ERROR_TYPE_ITEM,
352                                                    item,
353                                                    "ethertype mask not valid");
354                                 return -rte_errno;
355                         }
356
357                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
358                                 rte_memcpy(filter->dst_macaddr,
359                                            &eth_spec->dst, 6);
360                                 en |= use_ntuple ?
361                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
362                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
363                         }
364                         if (is_broadcast_ether_addr(&eth_mask->src)) {
365                                 rte_memcpy(filter->src_macaddr,
366                                            &eth_spec->src, 6);
367                                 en |= use_ntuple ?
368                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
369                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
370                         } /*
371                            * else {
372                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
373                            * }
374                            */
375                         if (eth_spec->type) {
376                                 filter->ethertype =
377                                         rte_be_to_cpu_16(eth_spec->type);
378                                 en |= use_ntuple ?
379                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
380                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
381                         }
382
383                         break;
384                 case RTE_FLOW_ITEM_TYPE_VLAN:
385                         vlan_spec = item->spec;
386                         vlan_mask = item->mask;
387                         if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
388                                 /* Only the VLAN ID can be matched. */
389                                 filter->l2_ovlan =
390                                         rte_be_to_cpu_16(vlan_spec->tci &
391                                                          0xFFF);
392                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
393                         } else {
394                                 rte_flow_error_set(error, EINVAL,
395                                                    RTE_FLOW_ERROR_TYPE_ITEM,
396                                                    item,
397                                                    "VLAN mask is invalid");
398                                 return -rte_errno;
399                         }
400
401                         break;
402                 case RTE_FLOW_ITEM_TYPE_IPV4:
403                         /* If mask is not involved, we could use EM filters. */
404                         ipv4_spec = item->spec;
405                         ipv4_mask = item->mask;
406                         /* Only IP DST and SRC fields are maskable. */
407                         if (ipv4_mask->hdr.version_ihl ||
408                             ipv4_mask->hdr.type_of_service ||
409                             ipv4_mask->hdr.total_length ||
410                             ipv4_mask->hdr.packet_id ||
411                             ipv4_mask->hdr.fragment_offset ||
412                             ipv4_mask->hdr.time_to_live ||
413                             ipv4_mask->hdr.next_proto_id ||
414                             ipv4_mask->hdr.hdr_checksum) {
415                                 rte_flow_error_set(error, EINVAL,
416                                            RTE_FLOW_ERROR_TYPE_ITEM,
417                                            item,
418                                            "Invalid IPv4 mask.");
419                                 return -rte_errno;
420                         }
421                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
422                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
423                         if (use_ntuple)
424                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
425                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
426                         else
427                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
428                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
429                         if (ipv4_mask->hdr.src_addr) {
430                                 filter->src_ipaddr_mask[0] =
431                                         ipv4_mask->hdr.src_addr;
432                                 en |= !use_ntuple ? 0 :
433                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
434                         }
435                         if (ipv4_mask->hdr.dst_addr) {
436                                 filter->dst_ipaddr_mask[0] =
437                                         ipv4_mask->hdr.dst_addr;
438                                 en |= !use_ntuple ? 0 :
439                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
440                         }
441                         filter->ip_addr_type = use_ntuple ?
442                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
443                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
444                         if (ipv4_spec->hdr.next_proto_id) {
445                                 filter->ip_protocol =
446                                         ipv4_spec->hdr.next_proto_id;
447                                 if (use_ntuple)
448                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
449                                 else
450                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
451                         }
452                         break;
453                 case RTE_FLOW_ITEM_TYPE_IPV6:
454                         ipv6_spec = item->spec;
455                         ipv6_mask = item->mask;
456
457                         /* Only IP DST and SRC fields are maskable. */
458                         if (ipv6_mask->hdr.vtc_flow ||
459                             ipv6_mask->hdr.payload_len ||
460                             ipv6_mask->hdr.proto ||
461                             ipv6_mask->hdr.hop_limits) {
462                                 rte_flow_error_set(error, EINVAL,
463                                            RTE_FLOW_ERROR_TYPE_ITEM,
464                                            item,
465                                            "Invalid IPv6 mask.");
466                                 return -rte_errno;
467                         }
468
469                         if (use_ntuple)
470                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
471                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
472                         else
473                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
474                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
475                         rte_memcpy(filter->src_ipaddr,
476                                    ipv6_spec->hdr.src_addr, 16);
477                         rte_memcpy(filter->dst_ipaddr,
478                                    ipv6_spec->hdr.dst_addr, 16);
479                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
480                                 rte_memcpy(filter->src_ipaddr_mask,
481                                            ipv6_mask->hdr.src_addr, 16);
482                                 en |= !use_ntuple ? 0 :
483                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
484                         }
485                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
486                                 rte_memcpy(filter->dst_ipaddr_mask,
487                                            ipv6_mask->hdr.dst_addr, 16);
488                                 en |= !use_ntuple ? 0 :
489                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
490                         }
491                         filter->ip_addr_type = use_ntuple ?
492                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
493                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
494                         break;
495                 case RTE_FLOW_ITEM_TYPE_TCP:
496                         tcp_spec = item->spec;
497                         tcp_mask = item->mask;
498
499                         /* Check TCP mask. Only DST & SRC ports are maskable */
500                         if (tcp_mask->hdr.sent_seq ||
501                             tcp_mask->hdr.recv_ack ||
502                             tcp_mask->hdr.data_off ||
503                             tcp_mask->hdr.tcp_flags ||
504                             tcp_mask->hdr.rx_win ||
505                             tcp_mask->hdr.cksum ||
506                             tcp_mask->hdr.tcp_urp) {
507                                 rte_flow_error_set(error, EINVAL,
508                                            RTE_FLOW_ERROR_TYPE_ITEM,
509                                            item,
510                                            "Invalid TCP mask");
511                                 return -rte_errno;
512                         }
513                         filter->src_port = tcp_spec->hdr.src_port;
514                         filter->dst_port = tcp_spec->hdr.dst_port;
515                         if (use_ntuple)
516                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
517                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
518                         else
519                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
520                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
521                         if (tcp_mask->hdr.dst_port) {
522                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
523                                 en |= !use_ntuple ? 0 :
524                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
525                         }
526                         if (tcp_mask->hdr.src_port) {
527                                 filter->src_port_mask = tcp_mask->hdr.src_port;
528                                 en |= !use_ntuple ? 0 :
529                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
530                         }
531                         break;
532                 case RTE_FLOW_ITEM_TYPE_UDP:
533                         udp_spec = item->spec;
534                         udp_mask = item->mask;
535
536                         if (udp_mask->hdr.dgram_len ||
537                             udp_mask->hdr.dgram_cksum) {
538                                 rte_flow_error_set(error, EINVAL,
539                                            RTE_FLOW_ERROR_TYPE_ITEM,
540                                            item,
541                                            "Invalid UDP mask");
542                                 return -rte_errno;
543                         }
544
545                         filter->src_port = udp_spec->hdr.src_port;
546                         filter->dst_port = udp_spec->hdr.dst_port;
547                         if (use_ntuple)
548                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
549                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
550                         else
551                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
552                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
553
554                         if (udp_mask->hdr.dst_port) {
555                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
556                                 en |= !use_ntuple ? 0 :
557                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
558                         }
559                         if (udp_mask->hdr.src_port) {
560                                 filter->src_port_mask = udp_mask->hdr.src_port;
561                                 en |= !use_ntuple ? 0 :
562                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
563                         }
564                         break;
565                 case RTE_FLOW_ITEM_TYPE_VXLAN:
566                         vxlan_spec = item->spec;
567                         vxlan_mask = item->mask;
568                         /* Check if VXLAN item is used to describe protocol.
569                          * If yes, both spec and mask should be NULL.
570                          * If no, both spec and mask shouldn't be NULL.
571                          */
572                         if ((!vxlan_spec && vxlan_mask) ||
573                             (vxlan_spec && !vxlan_mask)) {
574                                 rte_flow_error_set(error, EINVAL,
575                                            RTE_FLOW_ERROR_TYPE_ITEM,
576                                            item,
577                                            "Invalid VXLAN item");
578                                 return -rte_errno;
579                         }
580
581                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
582                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
583                             vxlan_spec->flags != 0x8) {
584                                 rte_flow_error_set(error, EINVAL,
585                                            RTE_FLOW_ERROR_TYPE_ITEM,
586                                            item,
587                                            "Invalid VXLAN item");
588                                 return -rte_errno;
589                         }
590
591                         /* Check if VNI is masked. */
592                         if (vxlan_spec && vxlan_mask) {
593                                 vni_masked =
594                                         !!memcmp(vxlan_mask->vni, vni_mask,
595                                                  RTE_DIM(vni_mask));
596                                 if (vni_masked) {
597                                         rte_flow_error_set(error, EINVAL,
598                                                    RTE_FLOW_ERROR_TYPE_ITEM,
599                                                    item,
600                                                    "Invalid VNI mask");
601                                         return -rte_errno;
602                                 }
603
604                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
605                                            vxlan_spec->vni, 3);
606                                 filter->vni =
607                                         rte_be_to_cpu_32(tenant_id_be);
608                                 filter->tunnel_type =
609                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
610                         }
611                         break;
612                 case RTE_FLOW_ITEM_TYPE_NVGRE:
613                         nvgre_spec = item->spec;
614                         nvgre_mask = item->mask;
615                         /* Check if NVGRE item is used to describe protocol.
616                          * If yes, both spec and mask should be NULL.
617                          * If no, both spec and mask shouldn't be NULL.
618                          */
619                         if ((!nvgre_spec && nvgre_mask) ||
620                             (nvgre_spec && !nvgre_mask)) {
621                                 rte_flow_error_set(error, EINVAL,
622                                            RTE_FLOW_ERROR_TYPE_ITEM,
623                                            item,
624                                            "Invalid NVGRE item");
625                                 return -rte_errno;
626                         }
627
628                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
629                             nvgre_spec->protocol != 0x6558) {
630                                 rte_flow_error_set(error, EINVAL,
631                                            RTE_FLOW_ERROR_TYPE_ITEM,
632                                            item,
633                                            "Invalid NVGRE item");
634                                 return -rte_errno;
635                         }
636
637                         if (nvgre_spec && nvgre_mask) {
638                                 tni_masked =
639                                         !!memcmp(nvgre_mask->tni, tni_mask,
640                                                  RTE_DIM(tni_mask));
641                                 if (tni_masked) {
642                                         rte_flow_error_set(error, EINVAL,
643                                                        RTE_FLOW_ERROR_TYPE_ITEM,
644                                                        item,
645                                                        "Invalid TNI mask");
646                                         return -rte_errno;
647                                 }
648                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
649                                            nvgre_spec->tni, 3);
650                                 filter->vni =
651                                         rte_be_to_cpu_32(tenant_id_be);
652                                 filter->tunnel_type =
653                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
654                         }
655                         break;
656                 case RTE_FLOW_ITEM_TYPE_VF:
657                         vf_spec = item->spec;
658                         vf = vf_spec->id;
659                         if (!BNXT_PF(bp)) {
660                                 rte_flow_error_set(error, EINVAL,
661                                            RTE_FLOW_ERROR_TYPE_ITEM,
662                                            item,
663                                            "Configuring on a VF!");
664                                 return -rte_errno;
665                         }
666
667                         if (vf >= bp->pdev->max_vfs) {
668                                 rte_flow_error_set(error, EINVAL,
669                                            RTE_FLOW_ERROR_TYPE_ITEM,
670                                            item,
671                                            "Incorrect VF id!");
672                                 return -rte_errno;
673                         }
674
675                         filter->mirror_vnic_id =
676                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
677                         if (dflt_vnic < 0) {
678                                 /* This simply indicates there's no driver
679                                  * loaded. This is not an error.
680                                  */
681                                 rte_flow_error_set(error, EINVAL,
682                                            RTE_FLOW_ERROR_TYPE_ITEM,
683                                            item,
684                                            "Unable to get default VNIC for VF");
685                                 return -rte_errno;
686                         }
687                         filter->mirror_vnic_id = dflt_vnic;
688                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
689                         break;
690                 default:
691                         break;
692                 }
693                 item++;
694         }
695         filter->enables = en;
696
697         return 0;
698 }
699
700 /* Parse attributes */
701 static int
702 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
703                      struct rte_flow_error *error)
704 {
705         /* Must be input direction */
706         if (!attr->ingress) {
707                 rte_flow_error_set(error, EINVAL,
708                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
709                                    attr, "Only support ingress.");
710                 return -rte_errno;
711         }
712
713         /* Not supported */
714         if (attr->egress) {
715                 rte_flow_error_set(error, EINVAL,
716                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
717                                    attr, "No support for egress.");
718                 return -rte_errno;
719         }
720
721         /* Not supported */
722         if (attr->priority) {
723                 rte_flow_error_set(error, EINVAL,
724                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
725                                    attr, "No support for priority.");
726                 return -rte_errno;
727         }
728
729         /* Not supported */
730         if (attr->group) {
731                 rte_flow_error_set(error, EINVAL,
732                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
733                                    attr, "No support for group.");
734                 return -rte_errno;
735         }
736
737         return 0;
738 }
739
740 struct bnxt_filter_info *
741 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
742                    struct bnxt_vnic_info *vnic)
743 {
744         struct bnxt_filter_info *filter1, *f0;
745         struct bnxt_vnic_info *vnic0;
746         int rc;
747
748         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
749         f0 = STAILQ_FIRST(&vnic0->filter);
750
751         //This flow has same DST MAC as the port/l2 filter.
752         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
753                 return f0;
754
755         //This flow needs DST MAC which is not same as port/l2
756         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
757         filter1 = bnxt_get_unused_filter(bp);
758         if (filter1 == NULL)
759                 return NULL;
760         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
761         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
762                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
763         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
764         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
765         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
766                                      filter1);
767         if (rc) {
768                 bnxt_free_filter(bp, filter1);
769                 return NULL;
770         }
771         return filter1;
772 }
773
774 static int
775 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
776                              const struct rte_flow_item pattern[],
777                              const struct rte_flow_action actions[],
778                              const struct rte_flow_attr *attr,
779                              struct rte_flow_error *error,
780                              struct bnxt_filter_info *filter)
781 {
782         const struct rte_flow_action *act = nxt_non_void_action(actions);
783         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
784         const struct rte_flow_action_queue *act_q;
785         const struct rte_flow_action_vf *act_vf;
786         struct bnxt_vnic_info *vnic, *vnic0;
787         struct bnxt_filter_info *filter1;
788         uint32_t vf = 0;
789         int dflt_vnic;
790         int rc;
791
792         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
793                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
794                 rte_flow_error_set(error, EINVAL,
795                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
796                                    "Cannot create flow on RSS queues");
797                 rc = -rte_errno;
798                 goto ret;
799         }
800
801         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
802         if (rc != 0)
803                 goto ret;
804
805         rc = bnxt_flow_parse_attr(attr, error);
806         if (rc != 0)
807                 goto ret;
808         //Since we support ingress attribute only - right now.
809         filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
810
811         switch (act->type) {
812         case RTE_FLOW_ACTION_TYPE_QUEUE:
813                 /* Allow this flow. Redirect to a VNIC. */
814                 act_q = (const struct rte_flow_action_queue *)act->conf;
815                 if (act_q->index >= bp->rx_nr_rings) {
816                         rte_flow_error_set(error, EINVAL,
817                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
818                                            "Invalid queue ID.");
819                         rc = -rte_errno;
820                         goto ret;
821                 }
822                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
823
824                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
825                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
826                 if (vnic == NULL) {
827                         rte_flow_error_set(error, EINVAL,
828                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
829                                            "No matching VNIC for queue ID.");
830                         rc = -rte_errno;
831                         goto ret;
832                 }
833                 filter->dst_id = vnic->fw_vnic_id;
834                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
835                 if (filter1 == NULL) {
836                         rc = -ENOSPC;
837                         goto ret;
838                 }
839                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
840                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
841                 break;
842         case RTE_FLOW_ACTION_TYPE_DROP:
843                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
844                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
845                 if (filter1 == NULL) {
846                         rc = -ENOSPC;
847                         goto ret;
848                 }
849                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
850                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
851                         filter->flags =
852                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
853                 else
854                         filter->flags =
855                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
856                 break;
857         case RTE_FLOW_ACTION_TYPE_COUNT:
858                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
859                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
860                 if (filter1 == NULL) {
861                         rc = -ENOSPC;
862                         goto ret;
863                 }
864                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
865                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
866                 break;
867         case RTE_FLOW_ACTION_TYPE_VF:
868                 act_vf = (const struct rte_flow_action_vf *)act->conf;
869                 vf = act_vf->id;
870                 if (!BNXT_PF(bp)) {
871                         rte_flow_error_set(error, EINVAL,
872                                    RTE_FLOW_ERROR_TYPE_ACTION,
873                                    act,
874                                    "Configuring on a VF!");
875                         rc = -rte_errno;
876                         goto ret;
877                 }
878
879                 if (vf >= bp->pdev->max_vfs) {
880                         rte_flow_error_set(error, EINVAL,
881                                    RTE_FLOW_ERROR_TYPE_ACTION,
882                                    act,
883                                    "Incorrect VF id!");
884                         rc = -rte_errno;
885                         goto ret;
886                 }
887
888                 filter->mirror_vnic_id =
889                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
890                 if (dflt_vnic < 0) {
891                         /* This simply indicates there's no driver loaded.
892                          * This is not an error.
893                          */
894                         rte_flow_error_set(error, EINVAL,
895                                    RTE_FLOW_ERROR_TYPE_ACTION,
896                                    act,
897                                    "Unable to get default VNIC for VF");
898                         rc = -rte_errno;
899                         goto ret;
900                 }
901                 filter->mirror_vnic_id = dflt_vnic;
902                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
903
904                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
905                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
906                 if (filter1 == NULL) {
907                         rc = -ENOSPC;
908                         goto ret;
909                 }
910                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
911                 break;
912
913         default:
914                 rte_flow_error_set(error, EINVAL,
915                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
916                                    "Invalid action.");
917                 rc = -rte_errno;
918                 goto ret;
919         }
920
921         if (filter1) {
922                 bnxt_free_filter(bp, filter1);
923                 filter1->fw_l2_filter_id = -1;
924         }
925
926         act = nxt_non_void_action(++act);
927         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928                 rte_flow_error_set(error, EINVAL,
929                                    RTE_FLOW_ERROR_TYPE_ACTION,
930                                    act, "Invalid action.");
931                 rc = -rte_errno;
932                 goto ret;
933         }
934 ret:
935         return rc;
936 }
937
938 static int
939 bnxt_flow_validate(struct rte_eth_dev *dev,
940                 const struct rte_flow_attr *attr,
941                 const struct rte_flow_item pattern[],
942                 const struct rte_flow_action actions[],
943                 struct rte_flow_error *error)
944 {
945         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
946         struct bnxt_filter_info *filter;
947         int ret = 0;
948
949         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
950         if (ret != 0)
951                 return ret;
952
953         filter = bnxt_get_unused_filter(bp);
954         if (filter == NULL) {
955                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
956                 return -ENOMEM;
957         }
958
959         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
960                                            error, filter);
961         /* No need to hold on to this filter if we are just validating flow */
962         filter->fw_l2_filter_id = -1;
963         bnxt_free_filter(bp, filter);
964
965         return ret;
966 }
967
968 static int
969 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
970 {
971         struct bnxt_filter_info *mf;
972         struct rte_flow *flow;
973         int i;
974
975         for (i = bp->nr_vnics - 1; i >= 0; i--) {
976                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
977
978                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
979                         mf = flow->filter;
980
981                         if (mf->filter_type == nf->filter_type &&
982                             mf->flags == nf->flags &&
983                             mf->src_port == nf->src_port &&
984                             mf->src_port_mask == nf->src_port_mask &&
985                             mf->dst_port == nf->dst_port &&
986                             mf->dst_port_mask == nf->dst_port_mask &&
987                             mf->ip_protocol == nf->ip_protocol &&
988                             mf->ip_addr_type == nf->ip_addr_type &&
989                             mf->ethertype == nf->ethertype &&
990                             mf->vni == nf->vni &&
991                             mf->tunnel_type == nf->tunnel_type &&
992                             mf->l2_ovlan == nf->l2_ovlan &&
993                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
994                             mf->l2_ivlan == nf->l2_ivlan &&
995                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
996                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
997                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
998                                     ETHER_ADDR_LEN) &&
999                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1000                                     ETHER_ADDR_LEN) &&
1001                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1002                                     ETHER_ADDR_LEN) &&
1003                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1004                                     sizeof(nf->src_ipaddr)) &&
1005                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1006                                     sizeof(nf->src_ipaddr_mask)) &&
1007                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1008                                     sizeof(nf->dst_ipaddr)) &&
1009                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1010                                     sizeof(nf->dst_ipaddr_mask))) {
1011                                 if (mf->dst_id == nf->dst_id)
1012                                         return -EEXIST;
1013                                 /* Same Flow, Different queue
1014                                  * Clear the old ntuple filter
1015                                  */
1016                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1017                                         bnxt_hwrm_clear_em_filter(bp, mf);
1018                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1019                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1020                                 /* Free the old filter, update flow
1021                                  * with new filter
1022                                  */
1023                                 bnxt_free_filter(bp, mf);
1024                                 flow->filter = nf;
1025                                 return -EXDEV;
1026                         }
1027                 }
1028         }
1029         return 0;
1030 }
1031
1032 static struct rte_flow *
1033 bnxt_flow_create(struct rte_eth_dev *dev,
1034                   const struct rte_flow_attr *attr,
1035                   const struct rte_flow_item pattern[],
1036                   const struct rte_flow_action actions[],
1037                   struct rte_flow_error *error)
1038 {
1039         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1040         struct bnxt_filter_info *filter;
1041         struct bnxt_vnic_info *vnic = NULL;
1042         bool update_flow = false;
1043         struct rte_flow *flow;
1044         unsigned int i;
1045         int ret = 0;
1046
1047         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1048         if (!flow) {
1049                 rte_flow_error_set(error, ENOMEM,
1050                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1051                                    "Failed to allocate memory");
1052                 return flow;
1053         }
1054
1055         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1056         if (ret != 0) {
1057                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1058                 goto free_flow;
1059         }
1060
1061         filter = bnxt_get_unused_filter(bp);
1062         if (filter == NULL) {
1063                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1064                 goto free_flow;
1065         }
1066
1067         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1068                                            error, filter);
1069         if (ret != 0)
1070                 goto free_filter;
1071
1072         ret = bnxt_match_filter(bp, filter);
1073         if (ret == -EEXIST) {
1074                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1075                 /* Clear the filter that was created as part of
1076                  * validate_and_parse_flow() above
1077                  */
1078                 bnxt_hwrm_clear_l2_filter(bp, filter);
1079                 goto free_filter;
1080         } else if (ret == -EXDEV) {
1081                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1082                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1083                 update_flow = true;
1084         }
1085
1086         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1087                 filter->enables |=
1088                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1089                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1090         }
1091         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1092                 filter->enables |=
1093                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1094                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1095         }
1096
1097         for (i = 0; i < bp->nr_vnics; i++) {
1098                 vnic = &bp->vnic_info[i];
1099                 if (filter->dst_id == vnic->fw_vnic_id)
1100                         break;
1101         }
1102
1103         if (!ret) {
1104                 flow->filter = filter;
1105                 flow->vnic = vnic;
1106                 if (update_flow) {
1107                         ret = -EXDEV;
1108                         goto free_flow;
1109                 }
1110                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1111                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1112                 return flow;
1113         }
1114 free_filter:
1115         bnxt_free_filter(bp, filter);
1116 free_flow:
1117         if (ret == -EEXIST)
1118                 rte_flow_error_set(error, ret,
1119                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1120                                    "Matching Flow exists.");
1121         else if (ret == -EXDEV)
1122                 rte_flow_error_set(error, ret,
1123                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1124                                    "Flow with pattern exists, updating destination queue");
1125         else
1126                 rte_flow_error_set(error, -ret,
1127                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128                                    "Failed to create flow.");
1129         rte_free(flow);
1130         flow = NULL;
1131         return flow;
1132 }
1133
1134 static int
1135 bnxt_flow_destroy(struct rte_eth_dev *dev,
1136                   struct rte_flow *flow,
1137                   struct rte_flow_error *error)
1138 {
1139         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1140         struct bnxt_filter_info *filter = flow->filter;
1141         struct bnxt_vnic_info *vnic = flow->vnic;
1142         int ret = 0;
1143
1144         ret = bnxt_match_filter(bp, filter);
1145         if (ret == 0)
1146                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1147         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1148                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1149         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1150                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1151
1152         bnxt_hwrm_clear_l2_filter(bp, filter);
1153         if (!ret) {
1154                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1155                 rte_free(flow);
1156         } else {
1157                 rte_flow_error_set(error, -ret,
1158                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1159                                    "Failed to destroy flow.");
1160         }
1161
1162         return ret;
1163 }
1164
1165 static int
1166 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1167 {
1168         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1169         struct bnxt_vnic_info *vnic;
1170         struct rte_flow *flow;
1171         unsigned int i;
1172         int ret = 0;
1173
1174         for (i = 0; i < bp->nr_vnics; i++) {
1175                 vnic = &bp->vnic_info[i];
1176                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1177                         struct bnxt_filter_info *filter = flow->filter;
1178
1179                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1180                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1181                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1182                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1183
1184                         if (ret) {
1185                                 rte_flow_error_set(error, -ret,
1186                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1187                                                    NULL,
1188                                                    "Failed to flush flow in HW.");
1189                                 return -rte_errno;
1190                         }
1191
1192                         STAILQ_REMOVE(&vnic->flow_list, flow,
1193                                       rte_flow, next);
1194                         rte_free(flow);
1195                 }
1196         }
1197
1198         return ret;
1199 }
1200
1201 const struct rte_flow_ops bnxt_flow_ops = {
1202         .validate = bnxt_flow_validate,
1203         .create = bnxt_flow_create,
1204         .destroy = bnxt_flow_destroy,
1205         .flush = bnxt_flow_flush,
1206 };