ethdev: fix behavior of VF/PF in flow API
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22  * Filter Functions
23  */
24
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 {
27         struct bnxt_filter_info *filter;
28
29         /* Find the 1st unused filter from the free_filter_list pool*/
30         filter = STAILQ_FIRST(&bp->free_filter_list);
31         if (!filter) {
32                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
33                 return NULL;
34         }
35         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36
37         /* Default to L2 MAC Addr filter */
38         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42                ETHER_ADDR_LEN);
43         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
44         return filter;
45 }
46
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 {
49         struct bnxt_filter_info *filter;
50
51         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52         if (!filter) {
53                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
54                         vf);
55                 return NULL;
56         }
57
58         filter->fw_l2_filter_id = UINT64_MAX;
59         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
60         return filter;
61 }
62
63 void bnxt_init_filters(struct bnxt *bp)
64 {
65         struct bnxt_filter_info *filter;
66         int i, max_filters;
67
68         max_filters = bp->max_l2_ctx;
69         STAILQ_INIT(&bp->free_filter_list);
70         for (i = 0; i < max_filters; i++) {
71                 filter = &bp->filter_info[i];
72                 filter->fw_l2_filter_id = UINT64_MAX;
73                 filter->fw_em_filter_id = UINT64_MAX;
74                 filter->fw_ntuple_filter_id = UINT64_MAX;
75                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
76         }
77 }
78
79 void bnxt_free_all_filters(struct bnxt *bp)
80 {
81         struct bnxt_vnic_info *vnic;
82         struct bnxt_filter_info *filter, *temp_filter;
83         int i;
84
85         for (i = 0; i < MAX_FF_POOLS; i++) {
86                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87                         filter = STAILQ_FIRST(&vnic->filter);
88                         while (filter) {
89                                 temp_filter = STAILQ_NEXT(filter, next);
90                                 STAILQ_REMOVE(&vnic->filter, filter,
91                                               bnxt_filter_info, next);
92                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
93                                                    filter, next);
94                                 filter = temp_filter;
95                         }
96                         STAILQ_INIT(&vnic->filter);
97                 }
98         }
99
100         for (i = 0; i < bp->pf.max_vfs; i++) {
101                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102                         bnxt_hwrm_clear_l2_filter(bp, filter);
103                 }
104         }
105 }
106
107 void bnxt_free_filter_mem(struct bnxt *bp)
108 {
109         struct bnxt_filter_info *filter;
110         uint16_t max_filters, i;
111         int rc = 0;
112
113         if (bp->filter_info == NULL)
114                 return;
115
116         /* Ensure that all filters are freed */
117         max_filters = bp->max_l2_ctx;
118         for (i = 0; i < max_filters; i++) {
119                 filter = &bp->filter_info[i];
120                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122                         /* Call HWRM to try to free filter again */
123                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
124                         if (rc)
125                                 PMD_DRV_LOG(ERR,
126                                        "HWRM filter cannot be freed rc = %d\n",
127                                         rc);
128                 }
129                 filter->fw_l2_filter_id = UINT64_MAX;
130         }
131         STAILQ_INIT(&bp->free_filter_list);
132
133         rte_free(bp->filter_info);
134         bp->filter_info = NULL;
135
136         for (i = 0; i < bp->pf.max_vfs; i++) {
137                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
138                         rte_free(filter);
139                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140                                       bnxt_filter_info, next);
141                 }
142         }
143 }
144
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
146 {
147         struct bnxt_filter_info *filter_mem;
148         uint16_t max_filters;
149
150         max_filters = bp->max_l2_ctx;
151         /* Allocate memory for VNIC pool and filter pool */
152         filter_mem = rte_zmalloc("bnxt_filter_info",
153                                  max_filters * sizeof(struct bnxt_filter_info),
154                                  0);
155         if (filter_mem == NULL) {
156                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
157                         max_filters);
158                 return -ENOMEM;
159         }
160         bp->filter_info = filter_mem;
161         return 0;
162 }
163
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter;
167
168         /* Find the 1st unused filter from the free_filter_list pool*/
169         filter = STAILQ_FIRST(&bp->free_filter_list);
170         if (!filter) {
171                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
172                 return NULL;
173         }
174         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
175
176         return filter;
177 }
178
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
180 {
181         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
182 }
183
184 static int
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186                         const struct rte_flow_item pattern[],
187                         const struct rte_flow_action actions[],
188                         struct rte_flow_error *error)
189 {
190         if (!pattern) {
191                 rte_flow_error_set(error, EINVAL,
192                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193                         NULL, "NULL pattern.");
194                 return -rte_errno;
195         }
196
197         if (!actions) {
198                 rte_flow_error_set(error, EINVAL,
199                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200                                    NULL, "NULL action.");
201                 return -rte_errno;
202         }
203
204         if (!attr) {
205                 rte_flow_error_set(error, EINVAL,
206                                    RTE_FLOW_ERROR_TYPE_ATTR,
207                                    NULL, "NULL attribute.");
208                 return -rte_errno;
209         }
210
211         return 0;
212 }
213
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
216 {
217         while (1) {
218                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
219                         return cur;
220                 cur++;
221         }
222 }
223
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
226 {
227         while (1) {
228                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
229                         return cur;
230                 cur++;
231         }
232 }
233
234 int check_zero_bytes(const uint8_t *bytes, int len)
235 {
236         int i;
237         for (i = 0; i < len; i++)
238                 if (bytes[i] != 0x00)
239                         return 0;
240         return 1;
241 }
242
243 static int
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245                        struct rte_flow_error *error __rte_unused)
246 {
247         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
248         int use_ntuple = 1;
249
250         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251                 switch (item->type) {
252                 case RTE_FLOW_ITEM_TYPE_ETH:
253                         use_ntuple = 1;
254                         break;
255                 case RTE_FLOW_ITEM_TYPE_VLAN:
256                         use_ntuple = 0;
257                         break;
258                 case RTE_FLOW_ITEM_TYPE_IPV4:
259                 case RTE_FLOW_ITEM_TYPE_IPV6:
260                 case RTE_FLOW_ITEM_TYPE_TCP:
261                 case RTE_FLOW_ITEM_TYPE_UDP:
262                         /* FALLTHROUGH */
263                         /* need ntuple match, reset exact match */
264                         if (!use_ntuple) {
265                                 PMD_DRV_LOG(ERR,
266                                         "VLAN flow cannot use NTUPLE filter\n");
267                                 rte_flow_error_set(error, EINVAL,
268                                                    RTE_FLOW_ERROR_TYPE_ITEM,
269                                                    item,
270                                                    "Cannot use VLAN with NTUPLE");
271                                 return -rte_errno;
272                         }
273                         use_ntuple |= 1;
274                         break;
275                 default:
276                         PMD_DRV_LOG(ERR, "Unknown Flow type");
277                         use_ntuple |= 1;
278                 }
279                 item++;
280         }
281         return use_ntuple;
282 }
283
284 static int
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286                                   const struct rte_flow_attr *attr,
287                                   const struct rte_flow_item pattern[],
288                                   struct rte_flow_error *error,
289                                   struct bnxt_filter_info *filter)
290 {
291         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
292         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
293         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
294         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
295         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
296         const struct rte_flow_item_udp *udp_spec, *udp_mask;
297         const struct rte_flow_item_eth *eth_spec, *eth_mask;
298         const struct rte_flow_item_nvgre *nvgre_spec;
299         const struct rte_flow_item_nvgre *nvgre_mask;
300         const struct rte_flow_item_vxlan *vxlan_spec;
301         const struct rte_flow_item_vxlan *vxlan_mask;
302         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
303         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
304         const struct rte_flow_item_vf *vf_spec;
305         uint32_t tenant_id_be = 0;
306         bool vni_masked = 0;
307         bool tni_masked = 0;
308         uint32_t vf = 0;
309         int use_ntuple;
310         uint32_t en = 0;
311         uint32_t en_ethertype;
312         int dflt_vnic;
313
314         use_ntuple = bnxt_filter_type_check(pattern, error);
315         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
316         if (use_ntuple < 0)
317                 return use_ntuple;
318
319         filter->filter_type = use_ntuple ?
320                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
321         en_ethertype = use_ntuple ?
322                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
323                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
324
325         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
326                 if (item->last) {
327                         /* last or range is NOT supported as match criteria */
328                         rte_flow_error_set(error, EINVAL,
329                                            RTE_FLOW_ERROR_TYPE_ITEM,
330                                            item,
331                                            "No support for range");
332                         return -rte_errno;
333                 }
334                 if (!item->spec || !item->mask) {
335                         rte_flow_error_set(error, EINVAL,
336                                            RTE_FLOW_ERROR_TYPE_ITEM,
337                                            item,
338                                            "spec/mask is NULL");
339                         return -rte_errno;
340                 }
341                 switch (item->type) {
342                 case RTE_FLOW_ITEM_TYPE_ETH:
343                         eth_spec = item->spec;
344                         eth_mask = item->mask;
345
346                         /* Source MAC address mask cannot be partially set.
347                          * Should be All 0's or all 1's.
348                          * Destination MAC address mask must not be partially
349                          * set. Should be all 1's or all 0's.
350                          */
351                         if ((!is_zero_ether_addr(&eth_mask->src) &&
352                              !is_broadcast_ether_addr(&eth_mask->src)) ||
353                             (!is_zero_ether_addr(&eth_mask->dst) &&
354                              !is_broadcast_ether_addr(&eth_mask->dst))) {
355                                 rte_flow_error_set(error, EINVAL,
356                                                    RTE_FLOW_ERROR_TYPE_ITEM,
357                                                    item,
358                                                    "MAC_addr mask not valid");
359                                 return -rte_errno;
360                         }
361
362                         /* Mask is not allowed. Only exact matches are */
363                         if (eth_mask->type &&
364                             eth_mask->type != RTE_BE16(0xffff)) {
365                                 rte_flow_error_set(error, EINVAL,
366                                                    RTE_FLOW_ERROR_TYPE_ITEM,
367                                                    item,
368                                                    "ethertype mask not valid");
369                                 return -rte_errno;
370                         }
371
372                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
373                                 rte_memcpy(filter->dst_macaddr,
374                                            &eth_spec->dst, 6);
375                                 en |= use_ntuple ?
376                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
377                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
378                         }
379                         if (is_broadcast_ether_addr(&eth_mask->src)) {
380                                 rte_memcpy(filter->src_macaddr,
381                                            &eth_spec->src, 6);
382                                 en |= use_ntuple ?
383                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
384                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
385                         } /*
386                            * else {
387                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
388                            * }
389                            */
390                         if (eth_mask->type) {
391                                 filter->ethertype =
392                                         rte_be_to_cpu_16(eth_spec->type);
393                                 en |= en_ethertype;
394                         }
395
396                         break;
397                 case RTE_FLOW_ITEM_TYPE_VLAN:
398                         vlan_spec = item->spec;
399                         vlan_mask = item->mask;
400                         if (en & en_ethertype) {
401                                 rte_flow_error_set(error, EINVAL,
402                                                    RTE_FLOW_ERROR_TYPE_ITEM,
403                                                    item,
404                                                    "VLAN TPID matching is not"
405                                                    " supported");
406                                 return -rte_errno;
407                         }
408                         if (vlan_mask->tci &&
409                             vlan_mask->tci == RTE_BE16(0x0fff)) {
410                                 /* Only the VLAN ID can be matched. */
411                                 filter->l2_ovlan =
412                                         rte_be_to_cpu_16(vlan_spec->tci &
413                                                          RTE_BE16(0x0fff));
414                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
415                         } else if (vlan_mask->tci) {
416                                 rte_flow_error_set(error, EINVAL,
417                                                    RTE_FLOW_ERROR_TYPE_ITEM,
418                                                    item,
419                                                    "VLAN mask is invalid");
420                                 return -rte_errno;
421                         }
422                         if (vlan_mask->inner_type &&
423                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
424                                 rte_flow_error_set(error, EINVAL,
425                                                    RTE_FLOW_ERROR_TYPE_ITEM,
426                                                    item,
427                                                    "inner ethertype mask not"
428                                                    " valid");
429                                 return -rte_errno;
430                         }
431                         if (vlan_mask->inner_type) {
432                                 filter->ethertype =
433                                         rte_be_to_cpu_16(vlan_spec->inner_type);
434                                 en |= en_ethertype;
435                         }
436
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_IPV4:
439                         /* If mask is not involved, we could use EM filters. */
440                         ipv4_spec = item->spec;
441                         ipv4_mask = item->mask;
442                         /* Only IP DST and SRC fields are maskable. */
443                         if (ipv4_mask->hdr.version_ihl ||
444                             ipv4_mask->hdr.type_of_service ||
445                             ipv4_mask->hdr.total_length ||
446                             ipv4_mask->hdr.packet_id ||
447                             ipv4_mask->hdr.fragment_offset ||
448                             ipv4_mask->hdr.time_to_live ||
449                             ipv4_mask->hdr.next_proto_id ||
450                             ipv4_mask->hdr.hdr_checksum) {
451                                 rte_flow_error_set(error, EINVAL,
452                                            RTE_FLOW_ERROR_TYPE_ITEM,
453                                            item,
454                                            "Invalid IPv4 mask.");
455                                 return -rte_errno;
456                         }
457                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
458                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
459                         if (use_ntuple)
460                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
461                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
462                         else
463                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
464                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
465                         if (ipv4_mask->hdr.src_addr) {
466                                 filter->src_ipaddr_mask[0] =
467                                         ipv4_mask->hdr.src_addr;
468                                 en |= !use_ntuple ? 0 :
469                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
470                         }
471                         if (ipv4_mask->hdr.dst_addr) {
472                                 filter->dst_ipaddr_mask[0] =
473                                         ipv4_mask->hdr.dst_addr;
474                                 en |= !use_ntuple ? 0 :
475                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
476                         }
477                         filter->ip_addr_type = use_ntuple ?
478                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
479                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
480                         if (ipv4_spec->hdr.next_proto_id) {
481                                 filter->ip_protocol =
482                                         ipv4_spec->hdr.next_proto_id;
483                                 if (use_ntuple)
484                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
485                                 else
486                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
487                         }
488                         break;
489                 case RTE_FLOW_ITEM_TYPE_IPV6:
490                         ipv6_spec = item->spec;
491                         ipv6_mask = item->mask;
492
493                         /* Only IP DST and SRC fields are maskable. */
494                         if (ipv6_mask->hdr.vtc_flow ||
495                             ipv6_mask->hdr.payload_len ||
496                             ipv6_mask->hdr.proto ||
497                             ipv6_mask->hdr.hop_limits) {
498                                 rte_flow_error_set(error, EINVAL,
499                                            RTE_FLOW_ERROR_TYPE_ITEM,
500                                            item,
501                                            "Invalid IPv6 mask.");
502                                 return -rte_errno;
503                         }
504
505                         if (use_ntuple)
506                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
507                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
508                         else
509                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
510                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
511                         rte_memcpy(filter->src_ipaddr,
512                                    ipv6_spec->hdr.src_addr, 16);
513                         rte_memcpy(filter->dst_ipaddr,
514                                    ipv6_spec->hdr.dst_addr, 16);
515                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
516                                 rte_memcpy(filter->src_ipaddr_mask,
517                                            ipv6_mask->hdr.src_addr, 16);
518                                 en |= !use_ntuple ? 0 :
519                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
520                         }
521                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
522                                 rte_memcpy(filter->dst_ipaddr_mask,
523                                            ipv6_mask->hdr.dst_addr, 16);
524                                 en |= !use_ntuple ? 0 :
525                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
526                         }
527                         filter->ip_addr_type = use_ntuple ?
528                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
529                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
530                         break;
531                 case RTE_FLOW_ITEM_TYPE_TCP:
532                         tcp_spec = item->spec;
533                         tcp_mask = item->mask;
534
535                         /* Check TCP mask. Only DST & SRC ports are maskable */
536                         if (tcp_mask->hdr.sent_seq ||
537                             tcp_mask->hdr.recv_ack ||
538                             tcp_mask->hdr.data_off ||
539                             tcp_mask->hdr.tcp_flags ||
540                             tcp_mask->hdr.rx_win ||
541                             tcp_mask->hdr.cksum ||
542                             tcp_mask->hdr.tcp_urp) {
543                                 rte_flow_error_set(error, EINVAL,
544                                            RTE_FLOW_ERROR_TYPE_ITEM,
545                                            item,
546                                            "Invalid TCP mask");
547                                 return -rte_errno;
548                         }
549                         filter->src_port = tcp_spec->hdr.src_port;
550                         filter->dst_port = tcp_spec->hdr.dst_port;
551                         if (use_ntuple)
552                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
553                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
554                         else
555                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
556                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
557                         if (tcp_mask->hdr.dst_port) {
558                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
559                                 en |= !use_ntuple ? 0 :
560                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
561                         }
562                         if (tcp_mask->hdr.src_port) {
563                                 filter->src_port_mask = tcp_mask->hdr.src_port;
564                                 en |= !use_ntuple ? 0 :
565                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
566                         }
567                         break;
568                 case RTE_FLOW_ITEM_TYPE_UDP:
569                         udp_spec = item->spec;
570                         udp_mask = item->mask;
571
572                         if (udp_mask->hdr.dgram_len ||
573                             udp_mask->hdr.dgram_cksum) {
574                                 rte_flow_error_set(error, EINVAL,
575                                            RTE_FLOW_ERROR_TYPE_ITEM,
576                                            item,
577                                            "Invalid UDP mask");
578                                 return -rte_errno;
579                         }
580
581                         filter->src_port = udp_spec->hdr.src_port;
582                         filter->dst_port = udp_spec->hdr.dst_port;
583                         if (use_ntuple)
584                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
585                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
586                         else
587                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
588                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
589
590                         if (udp_mask->hdr.dst_port) {
591                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
592                                 en |= !use_ntuple ? 0 :
593                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
594                         }
595                         if (udp_mask->hdr.src_port) {
596                                 filter->src_port_mask = udp_mask->hdr.src_port;
597                                 en |= !use_ntuple ? 0 :
598                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
599                         }
600                         break;
601                 case RTE_FLOW_ITEM_TYPE_VXLAN:
602                         vxlan_spec = item->spec;
603                         vxlan_mask = item->mask;
604                         /* Check if VXLAN item is used to describe protocol.
605                          * If yes, both spec and mask should be NULL.
606                          * If no, both spec and mask shouldn't be NULL.
607                          */
608                         if ((!vxlan_spec && vxlan_mask) ||
609                             (vxlan_spec && !vxlan_mask)) {
610                                 rte_flow_error_set(error, EINVAL,
611                                            RTE_FLOW_ERROR_TYPE_ITEM,
612                                            item,
613                                            "Invalid VXLAN item");
614                                 return -rte_errno;
615                         }
616
617                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
618                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
619                             vxlan_spec->flags != 0x8) {
620                                 rte_flow_error_set(error, EINVAL,
621                                            RTE_FLOW_ERROR_TYPE_ITEM,
622                                            item,
623                                            "Invalid VXLAN item");
624                                 return -rte_errno;
625                         }
626
627                         /* Check if VNI is masked. */
628                         if (vxlan_spec && vxlan_mask) {
629                                 vni_masked =
630                                         !!memcmp(vxlan_mask->vni, vni_mask,
631                                                  RTE_DIM(vni_mask));
632                                 if (vni_masked) {
633                                         rte_flow_error_set(error, EINVAL,
634                                                    RTE_FLOW_ERROR_TYPE_ITEM,
635                                                    item,
636                                                    "Invalid VNI mask");
637                                         return -rte_errno;
638                                 }
639
640                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
641                                            vxlan_spec->vni, 3);
642                                 filter->vni =
643                                         rte_be_to_cpu_32(tenant_id_be);
644                                 filter->tunnel_type =
645                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
646                         }
647                         break;
648                 case RTE_FLOW_ITEM_TYPE_NVGRE:
649                         nvgre_spec = item->spec;
650                         nvgre_mask = item->mask;
651                         /* Check if NVGRE item is used to describe protocol.
652                          * If yes, both spec and mask should be NULL.
653                          * If no, both spec and mask shouldn't be NULL.
654                          */
655                         if ((!nvgre_spec && nvgre_mask) ||
656                             (nvgre_spec && !nvgre_mask)) {
657                                 rte_flow_error_set(error, EINVAL,
658                                            RTE_FLOW_ERROR_TYPE_ITEM,
659                                            item,
660                                            "Invalid NVGRE item");
661                                 return -rte_errno;
662                         }
663
664                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
665                             nvgre_spec->protocol != 0x6558) {
666                                 rte_flow_error_set(error, EINVAL,
667                                            RTE_FLOW_ERROR_TYPE_ITEM,
668                                            item,
669                                            "Invalid NVGRE item");
670                                 return -rte_errno;
671                         }
672
673                         if (nvgre_spec && nvgre_mask) {
674                                 tni_masked =
675                                         !!memcmp(nvgre_mask->tni, tni_mask,
676                                                  RTE_DIM(tni_mask));
677                                 if (tni_masked) {
678                                         rte_flow_error_set(error, EINVAL,
679                                                        RTE_FLOW_ERROR_TYPE_ITEM,
680                                                        item,
681                                                        "Invalid TNI mask");
682                                         return -rte_errno;
683                                 }
684                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
685                                            nvgre_spec->tni, 3);
686                                 filter->vni =
687                                         rte_be_to_cpu_32(tenant_id_be);
688                                 filter->tunnel_type =
689                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
690                         }
691                         break;
692                 case RTE_FLOW_ITEM_TYPE_VF:
693                         vf_spec = item->spec;
694                         vf = vf_spec->id;
695                         if (!BNXT_PF(bp)) {
696                                 rte_flow_error_set(error, EINVAL,
697                                            RTE_FLOW_ERROR_TYPE_ITEM,
698                                            item,
699                                            "Configuring on a VF!");
700                                 return -rte_errno;
701                         }
702
703                         if (vf >= bp->pdev->max_vfs) {
704                                 rte_flow_error_set(error, EINVAL,
705                                            RTE_FLOW_ERROR_TYPE_ITEM,
706                                            item,
707                                            "Incorrect VF id!");
708                                 return -rte_errno;
709                         }
710
711                         if (!attr->transfer) {
712                                 rte_flow_error_set(error, ENOTSUP,
713                                            RTE_FLOW_ERROR_TYPE_ITEM,
714                                            item,
715                                            "Matching VF traffic without"
716                                            " affecting it (transfer attribute)"
717                                            " is unsupported");
718                                 return -rte_errno;
719                         }
720
721                         filter->mirror_vnic_id =
722                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
723                         if (dflt_vnic < 0) {
724                                 /* This simply indicates there's no driver
725                                  * loaded. This is not an error.
726                                  */
727                                 rte_flow_error_set(error, EINVAL,
728                                            RTE_FLOW_ERROR_TYPE_ITEM,
729                                            item,
730                                            "Unable to get default VNIC for VF");
731                                 return -rte_errno;
732                         }
733                         filter->mirror_vnic_id = dflt_vnic;
734                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
735                         break;
736                 default:
737                         break;
738                 }
739                 item++;
740         }
741         filter->enables = en;
742
743         return 0;
744 }
745
746 /* Parse attributes */
747 static int
748 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
749                      struct rte_flow_error *error)
750 {
751         /* Must be input direction */
752         if (!attr->ingress) {
753                 rte_flow_error_set(error, EINVAL,
754                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
755                                    attr, "Only support ingress.");
756                 return -rte_errno;
757         }
758
759         /* Not supported */
760         if (attr->egress) {
761                 rte_flow_error_set(error, EINVAL,
762                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
763                                    attr, "No support for egress.");
764                 return -rte_errno;
765         }
766
767         /* Not supported */
768         if (attr->priority) {
769                 rte_flow_error_set(error, EINVAL,
770                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
771                                    attr, "No support for priority.");
772                 return -rte_errno;
773         }
774
775         /* Not supported */
776         if (attr->group) {
777                 rte_flow_error_set(error, EINVAL,
778                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
779                                    attr, "No support for group.");
780                 return -rte_errno;
781         }
782
783         return 0;
784 }
785
786 struct bnxt_filter_info *
787 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
788                    struct bnxt_vnic_info *vnic)
789 {
790         struct bnxt_filter_info *filter1, *f0;
791         struct bnxt_vnic_info *vnic0;
792         int rc;
793
794         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
795         f0 = STAILQ_FIRST(&vnic0->filter);
796
797         //This flow has same DST MAC as the port/l2 filter.
798         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
799                 return f0;
800
801         //This flow needs DST MAC which is not same as port/l2
802         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
803         filter1 = bnxt_get_unused_filter(bp);
804         if (filter1 == NULL)
805                 return NULL;
806         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
807         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
808                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
809         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
810         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
811         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
812                                      filter1);
813         if (rc) {
814                 bnxt_free_filter(bp, filter1);
815                 return NULL;
816         }
817         return filter1;
818 }
819
820 static int
821 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
822                              const struct rte_flow_item pattern[],
823                              const struct rte_flow_action actions[],
824                              const struct rte_flow_attr *attr,
825                              struct rte_flow_error *error,
826                              struct bnxt_filter_info *filter)
827 {
828         const struct rte_flow_action *act = nxt_non_void_action(actions);
829         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
830         const struct rte_flow_action_queue *act_q;
831         const struct rte_flow_action_vf *act_vf;
832         struct bnxt_vnic_info *vnic, *vnic0;
833         struct bnxt_filter_info *filter1;
834         uint32_t vf = 0;
835         int dflt_vnic;
836         int rc;
837
838         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
839                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
840                 rte_flow_error_set(error, EINVAL,
841                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
842                                    "Cannot create flow on RSS queues");
843                 rc = -rte_errno;
844                 goto ret;
845         }
846
847         rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
848                                                filter);
849         if (rc != 0)
850                 goto ret;
851
852         rc = bnxt_flow_parse_attr(attr, error);
853         if (rc != 0)
854                 goto ret;
855         //Since we support ingress attribute only - right now.
856         if (filter->filter_type == HWRM_CFA_EM_FILTER)
857                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
858
859         switch (act->type) {
860         case RTE_FLOW_ACTION_TYPE_QUEUE:
861                 /* Allow this flow. Redirect to a VNIC. */
862                 act_q = (const struct rte_flow_action_queue *)act->conf;
863                 if (act_q->index >= bp->rx_nr_rings) {
864                         rte_flow_error_set(error, EINVAL,
865                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
866                                            "Invalid queue ID.");
867                         rc = -rte_errno;
868                         goto ret;
869                 }
870                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
871
872                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
873                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
874                 if (vnic == NULL) {
875                         rte_flow_error_set(error, EINVAL,
876                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
877                                            "No matching VNIC for queue ID.");
878                         rc = -rte_errno;
879                         goto ret;
880                 }
881                 filter->dst_id = vnic->fw_vnic_id;
882                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
883                 if (filter1 == NULL) {
884                         rc = -ENOSPC;
885                         goto ret;
886                 }
887                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
888                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
889                 break;
890         case RTE_FLOW_ACTION_TYPE_DROP:
891                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
892                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
893                 if (filter1 == NULL) {
894                         rc = -ENOSPC;
895                         goto ret;
896                 }
897                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
898                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
899                         filter->flags =
900                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
901                 else
902                         filter->flags =
903                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
904                 break;
905         case RTE_FLOW_ACTION_TYPE_COUNT:
906                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
907                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
908                 if (filter1 == NULL) {
909                         rc = -ENOSPC;
910                         goto ret;
911                 }
912                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
913                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
914                 break;
915         case RTE_FLOW_ACTION_TYPE_VF:
916                 act_vf = (const struct rte_flow_action_vf *)act->conf;
917                 vf = act_vf->id;
918                 if (!BNXT_PF(bp)) {
919                         rte_flow_error_set(error, EINVAL,
920                                    RTE_FLOW_ERROR_TYPE_ACTION,
921                                    act,
922                                    "Configuring on a VF!");
923                         rc = -rte_errno;
924                         goto ret;
925                 }
926
927                 if (vf >= bp->pdev->max_vfs) {
928                         rte_flow_error_set(error, EINVAL,
929                                    RTE_FLOW_ERROR_TYPE_ACTION,
930                                    act,
931                                    "Incorrect VF id!");
932                         rc = -rte_errno;
933                         goto ret;
934                 }
935
936                 filter->mirror_vnic_id =
937                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
938                 if (dflt_vnic < 0) {
939                         /* This simply indicates there's no driver loaded.
940                          * This is not an error.
941                          */
942                         rte_flow_error_set(error, EINVAL,
943                                    RTE_FLOW_ERROR_TYPE_ACTION,
944                                    act,
945                                    "Unable to get default VNIC for VF");
946                         rc = -rte_errno;
947                         goto ret;
948                 }
949                 filter->mirror_vnic_id = dflt_vnic;
950                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
951
952                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
953                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
954                 if (filter1 == NULL) {
955                         rc = -ENOSPC;
956                         goto ret;
957                 }
958                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
959                 break;
960
961         default:
962                 rte_flow_error_set(error, EINVAL,
963                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
964                                    "Invalid action.");
965                 rc = -rte_errno;
966                 goto ret;
967         }
968
969         act = nxt_non_void_action(++act);
970         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
971                 rte_flow_error_set(error, EINVAL,
972                                    RTE_FLOW_ERROR_TYPE_ACTION,
973                                    act, "Invalid action.");
974                 rc = -rte_errno;
975                 goto ret;
976         }
977 ret:
978         return rc;
979 }
980
981 static int
982 bnxt_flow_validate(struct rte_eth_dev *dev,
983                 const struct rte_flow_attr *attr,
984                 const struct rte_flow_item pattern[],
985                 const struct rte_flow_action actions[],
986                 struct rte_flow_error *error)
987 {
988         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
989         struct bnxt_filter_info *filter;
990         int ret = 0;
991
992         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
993         if (ret != 0)
994                 return ret;
995
996         filter = bnxt_get_unused_filter(bp);
997         if (filter == NULL) {
998                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
999                 return -ENOMEM;
1000         }
1001
1002         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1003                                            error, filter);
1004         /* No need to hold on to this filter if we are just validating flow */
1005         filter->fw_l2_filter_id = UINT64_MAX;
1006         bnxt_free_filter(bp, filter);
1007
1008         return ret;
1009 }
1010
1011 static int
1012 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1013 {
1014         struct bnxt_filter_info *mf;
1015         struct rte_flow *flow;
1016         int i;
1017
1018         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1019                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1020
1021                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1022                         mf = flow->filter;
1023
1024                         if (mf->filter_type == nf->filter_type &&
1025                             mf->flags == nf->flags &&
1026                             mf->src_port == nf->src_port &&
1027                             mf->src_port_mask == nf->src_port_mask &&
1028                             mf->dst_port == nf->dst_port &&
1029                             mf->dst_port_mask == nf->dst_port_mask &&
1030                             mf->ip_protocol == nf->ip_protocol &&
1031                             mf->ip_addr_type == nf->ip_addr_type &&
1032                             mf->ethertype == nf->ethertype &&
1033                             mf->vni == nf->vni &&
1034                             mf->tunnel_type == nf->tunnel_type &&
1035                             mf->l2_ovlan == nf->l2_ovlan &&
1036                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1037                             mf->l2_ivlan == nf->l2_ivlan &&
1038                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1039                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1040                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1041                                     ETHER_ADDR_LEN) &&
1042                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1043                                     ETHER_ADDR_LEN) &&
1044                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1045                                     ETHER_ADDR_LEN) &&
1046                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1047                                     sizeof(nf->src_ipaddr)) &&
1048                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1049                                     sizeof(nf->src_ipaddr_mask)) &&
1050                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1051                                     sizeof(nf->dst_ipaddr)) &&
1052                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1053                                     sizeof(nf->dst_ipaddr_mask))) {
1054                                 if (mf->dst_id == nf->dst_id)
1055                                         return -EEXIST;
1056                                 /* Same Flow, Different queue
1057                                  * Clear the old ntuple filter
1058                                  */
1059                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1060                                         bnxt_hwrm_clear_em_filter(bp, mf);
1061                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1062                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1063                                 /* Free the old filter, update flow
1064                                  * with new filter
1065                                  */
1066                                 bnxt_free_filter(bp, mf);
1067                                 flow->filter = nf;
1068                                 return -EXDEV;
1069                         }
1070                 }
1071         }
1072         return 0;
1073 }
1074
1075 static struct rte_flow *
1076 bnxt_flow_create(struct rte_eth_dev *dev,
1077                   const struct rte_flow_attr *attr,
1078                   const struct rte_flow_item pattern[],
1079                   const struct rte_flow_action actions[],
1080                   struct rte_flow_error *error)
1081 {
1082         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1083         struct bnxt_filter_info *filter;
1084         struct bnxt_vnic_info *vnic = NULL;
1085         bool update_flow = false;
1086         struct rte_flow *flow;
1087         unsigned int i;
1088         int ret = 0;
1089
1090         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1091         if (!flow) {
1092                 rte_flow_error_set(error, ENOMEM,
1093                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1094                                    "Failed to allocate memory");
1095                 return flow;
1096         }
1097
1098         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1099         if (ret != 0) {
1100                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1101                 goto free_flow;
1102         }
1103
1104         filter = bnxt_get_unused_filter(bp);
1105         if (filter == NULL) {
1106                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1107                 goto free_flow;
1108         }
1109
1110         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1111                                            error, filter);
1112         if (ret != 0)
1113                 goto free_filter;
1114
1115         ret = bnxt_match_filter(bp, filter);
1116         if (ret == -EEXIST) {
1117                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1118                 /* Clear the filter that was created as part of
1119                  * validate_and_parse_flow() above
1120                  */
1121                 bnxt_hwrm_clear_l2_filter(bp, filter);
1122                 goto free_filter;
1123         } else if (ret == -EXDEV) {
1124                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1125                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1126                 update_flow = true;
1127         }
1128
1129         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1130                 filter->enables |=
1131                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1132                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1133         }
1134         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1135                 filter->enables |=
1136                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1137                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1138         }
1139
1140         for (i = 0; i < bp->nr_vnics; i++) {
1141                 vnic = &bp->vnic_info[i];
1142                 if (filter->dst_id == vnic->fw_vnic_id)
1143                         break;
1144         }
1145
1146         if (!ret) {
1147                 flow->filter = filter;
1148                 flow->vnic = vnic;
1149                 if (update_flow) {
1150                         ret = -EXDEV;
1151                         goto free_flow;
1152                 }
1153                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1154                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1155                 return flow;
1156         }
1157 free_filter:
1158         bnxt_free_filter(bp, filter);
1159 free_flow:
1160         if (ret == -EEXIST)
1161                 rte_flow_error_set(error, ret,
1162                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1163                                    "Matching Flow exists.");
1164         else if (ret == -EXDEV)
1165                 rte_flow_error_set(error, ret,
1166                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1167                                    "Flow with pattern exists, updating destination queue");
1168         else
1169                 rte_flow_error_set(error, -ret,
1170                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1171                                    "Failed to create flow.");
1172         rte_free(flow);
1173         flow = NULL;
1174         return flow;
1175 }
1176
1177 static int
1178 bnxt_flow_destroy(struct rte_eth_dev *dev,
1179                   struct rte_flow *flow,
1180                   struct rte_flow_error *error)
1181 {
1182         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1183         struct bnxt_filter_info *filter = flow->filter;
1184         struct bnxt_vnic_info *vnic = flow->vnic;
1185         int ret = 0;
1186
1187         ret = bnxt_match_filter(bp, filter);
1188         if (ret == 0)
1189                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1190         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1191                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1192         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1193                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1194         else
1195                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1196         if (!ret) {
1197                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1198                 rte_free(flow);
1199         } else {
1200                 rte_flow_error_set(error, -ret,
1201                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1202                                    "Failed to destroy flow.");
1203         }
1204
1205         return ret;
1206 }
1207
1208 static int
1209 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1210 {
1211         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1212         struct bnxt_vnic_info *vnic;
1213         struct rte_flow *flow;
1214         unsigned int i;
1215         int ret = 0;
1216
1217         for (i = 0; i < bp->nr_vnics; i++) {
1218                 vnic = &bp->vnic_info[i];
1219                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1220                         struct bnxt_filter_info *filter = flow->filter;
1221
1222                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1223                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1224                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1225                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1226
1227                         if (ret) {
1228                                 rte_flow_error_set(error, -ret,
1229                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1230                                                    NULL,
1231                                                    "Failed to flush flow in HW.");
1232                                 return -rte_errno;
1233                         }
1234
1235                         STAILQ_REMOVE(&vnic->flow_list, flow,
1236                                       rte_flow, next);
1237                         rte_free(flow);
1238                 }
1239         }
1240
1241         return ret;
1242 }
1243
1244 const struct rte_flow_ops bnxt_flow_ops = {
1245         .validate = bnxt_flow_validate,
1246         .create = bnxt_flow_create,
1247         .destroy = bnxt_flow_destroy,
1248         .flush = bnxt_flow_flush,
1249 };