net/bnxt: move function check zero bytes
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_util.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
21
22 /*
23  * Filter Functions
24  */
25
26 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
27 {
28         struct bnxt_filter_info *filter;
29
30         /* Find the 1st unused filter from the free_filter_list pool*/
31         filter = STAILQ_FIRST(&bp->free_filter_list);
32         if (!filter) {
33                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
34                 return NULL;
35         }
36         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
37
38         /* Default to L2 MAC Addr filter */
39         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
40         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
41                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
42         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
43                ETHER_ADDR_LEN);
44         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
45         return filter;
46 }
47
48 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
49 {
50         struct bnxt_filter_info *filter;
51
52         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
53         if (!filter) {
54                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
55                         vf);
56                 return NULL;
57         }
58
59         filter->fw_l2_filter_id = UINT64_MAX;
60         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
61         return filter;
62 }
63
64 void bnxt_init_filters(struct bnxt *bp)
65 {
66         struct bnxt_filter_info *filter;
67         int i, max_filters;
68
69         max_filters = bp->max_l2_ctx;
70         STAILQ_INIT(&bp->free_filter_list);
71         for (i = 0; i < max_filters; i++) {
72                 filter = &bp->filter_info[i];
73                 filter->fw_l2_filter_id = UINT64_MAX;
74                 filter->fw_em_filter_id = UINT64_MAX;
75                 filter->fw_ntuple_filter_id = UINT64_MAX;
76                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
77         }
78 }
79
80 void bnxt_free_all_filters(struct bnxt *bp)
81 {
82         struct bnxt_vnic_info *vnic;
83         struct bnxt_filter_info *filter, *temp_filter;
84         int i;
85
86         for (i = 0; i < MAX_FF_POOLS; i++) {
87                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
88                         filter = STAILQ_FIRST(&vnic->filter);
89                         while (filter) {
90                                 temp_filter = STAILQ_NEXT(filter, next);
91                                 STAILQ_REMOVE(&vnic->filter, filter,
92                                               bnxt_filter_info, next);
93                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
94                                                    filter, next);
95                                 filter = temp_filter;
96                         }
97                         STAILQ_INIT(&vnic->filter);
98                 }
99         }
100
101         for (i = 0; i < bp->pf.max_vfs; i++) {
102                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
103                         bnxt_hwrm_clear_l2_filter(bp, filter);
104                 }
105         }
106 }
107
108 void bnxt_free_filter_mem(struct bnxt *bp)
109 {
110         struct bnxt_filter_info *filter;
111         uint16_t max_filters, i;
112         int rc = 0;
113
114         if (bp->filter_info == NULL)
115                 return;
116
117         /* Ensure that all filters are freed */
118         max_filters = bp->max_l2_ctx;
119         for (i = 0; i < max_filters; i++) {
120                 filter = &bp->filter_info[i];
121                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
122                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
123                         /* Call HWRM to try to free filter again */
124                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
125                         if (rc)
126                                 PMD_DRV_LOG(ERR,
127                                        "HWRM filter cannot be freed rc = %d\n",
128                                         rc);
129                 }
130                 filter->fw_l2_filter_id = UINT64_MAX;
131         }
132         STAILQ_INIT(&bp->free_filter_list);
133
134         rte_free(bp->filter_info);
135         bp->filter_info = NULL;
136
137         for (i = 0; i < bp->pf.max_vfs; i++) {
138                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
139                         rte_free(filter);
140                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
141                                       bnxt_filter_info, next);
142                 }
143         }
144 }
145
146 int bnxt_alloc_filter_mem(struct bnxt *bp)
147 {
148         struct bnxt_filter_info *filter_mem;
149         uint16_t max_filters;
150
151         max_filters = bp->max_l2_ctx;
152         /* Allocate memory for VNIC pool and filter pool */
153         filter_mem = rte_zmalloc("bnxt_filter_info",
154                                  max_filters * sizeof(struct bnxt_filter_info),
155                                  0);
156         if (filter_mem == NULL) {
157                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
158                         max_filters);
159                 return -ENOMEM;
160         }
161         bp->filter_info = filter_mem;
162         return 0;
163 }
164
165 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
166 {
167         struct bnxt_filter_info *filter;
168
169         /* Find the 1st unused filter from the free_filter_list pool*/
170         filter = STAILQ_FIRST(&bp->free_filter_list);
171         if (!filter) {
172                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
173                 return NULL;
174         }
175         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
176
177         return filter;
178 }
179
180 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
181 {
182         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
183 }
184
185 static int
186 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
187                         const struct rte_flow_item pattern[],
188                         const struct rte_flow_action actions[],
189                         struct rte_flow_error *error)
190 {
191         if (!pattern) {
192                 rte_flow_error_set(error, EINVAL,
193                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
194                         NULL, "NULL pattern.");
195                 return -rte_errno;
196         }
197
198         if (!actions) {
199                 rte_flow_error_set(error, EINVAL,
200                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
201                                    NULL, "NULL action.");
202                 return -rte_errno;
203         }
204
205         if (!attr) {
206                 rte_flow_error_set(error, EINVAL,
207                                    RTE_FLOW_ERROR_TYPE_ATTR,
208                                    NULL, "NULL attribute.");
209                 return -rte_errno;
210         }
211
212         return 0;
213 }
214
215 static const struct rte_flow_item *
216 nxt_non_void_pattern(const struct rte_flow_item *cur)
217 {
218         while (1) {
219                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
220                         return cur;
221                 cur++;
222         }
223 }
224
225 static const struct rte_flow_action *
226 nxt_non_void_action(const struct rte_flow_action *cur)
227 {
228         while (1) {
229                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
230                         return cur;
231                 cur++;
232         }
233 }
234
235 static int
236 bnxt_filter_type_check(const struct rte_flow_item pattern[],
237                        struct rte_flow_error *error __rte_unused)
238 {
239         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
240         int use_ntuple = 1;
241
242         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
243                 switch (item->type) {
244                 case RTE_FLOW_ITEM_TYPE_ETH:
245                         use_ntuple = 1;
246                         break;
247                 case RTE_FLOW_ITEM_TYPE_VLAN:
248                         use_ntuple = 0;
249                         break;
250                 case RTE_FLOW_ITEM_TYPE_IPV4:
251                 case RTE_FLOW_ITEM_TYPE_IPV6:
252                 case RTE_FLOW_ITEM_TYPE_TCP:
253                 case RTE_FLOW_ITEM_TYPE_UDP:
254                         /* FALLTHROUGH */
255                         /* need ntuple match, reset exact match */
256                         if (!use_ntuple) {
257                                 PMD_DRV_LOG(ERR,
258                                         "VLAN flow cannot use NTUPLE filter\n");
259                                 rte_flow_error_set(error, EINVAL,
260                                                    RTE_FLOW_ERROR_TYPE_ITEM,
261                                                    item,
262                                                    "Cannot use VLAN with NTUPLE");
263                                 return -rte_errno;
264                         }
265                         use_ntuple |= 1;
266                         break;
267                 default:
268                         PMD_DRV_LOG(ERR, "Unknown Flow type");
269                         use_ntuple |= 1;
270                 }
271                 item++;
272         }
273         return use_ntuple;
274 }
275
276 static int
277 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
278                                   const struct rte_flow_attr *attr,
279                                   const struct rte_flow_item pattern[],
280                                   struct rte_flow_error *error,
281                                   struct bnxt_filter_info *filter)
282 {
283         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
284         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
285         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
286         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
287         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
288         const struct rte_flow_item_udp *udp_spec, *udp_mask;
289         const struct rte_flow_item_eth *eth_spec, *eth_mask;
290         const struct rte_flow_item_nvgre *nvgre_spec;
291         const struct rte_flow_item_nvgre *nvgre_mask;
292         const struct rte_flow_item_vxlan *vxlan_spec;
293         const struct rte_flow_item_vxlan *vxlan_mask;
294         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
295         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
296         const struct rte_flow_item_vf *vf_spec;
297         uint32_t tenant_id_be = 0;
298         bool vni_masked = 0;
299         bool tni_masked = 0;
300         uint32_t vf = 0;
301         int use_ntuple;
302         uint32_t en = 0;
303         uint32_t en_ethertype;
304         int dflt_vnic;
305
306         use_ntuple = bnxt_filter_type_check(pattern, error);
307         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
308         if (use_ntuple < 0)
309                 return use_ntuple;
310
311         filter->filter_type = use_ntuple ?
312                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
313         en_ethertype = use_ntuple ?
314                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
315                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
316
317         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
318                 if (item->last) {
319                         /* last or range is NOT supported as match criteria */
320                         rte_flow_error_set(error, EINVAL,
321                                            RTE_FLOW_ERROR_TYPE_ITEM,
322                                            item,
323                                            "No support for range");
324                         return -rte_errno;
325                 }
326                 if (!item->spec || !item->mask) {
327                         rte_flow_error_set(error, EINVAL,
328                                            RTE_FLOW_ERROR_TYPE_ITEM,
329                                            item,
330                                            "spec/mask is NULL");
331                         return -rte_errno;
332                 }
333                 switch (item->type) {
334                 case RTE_FLOW_ITEM_TYPE_ETH:
335                         eth_spec = item->spec;
336                         eth_mask = item->mask;
337
338                         /* Source MAC address mask cannot be partially set.
339                          * Should be All 0's or all 1's.
340                          * Destination MAC address mask must not be partially
341                          * set. Should be all 1's or all 0's.
342                          */
343                         if ((!is_zero_ether_addr(&eth_mask->src) &&
344                              !is_broadcast_ether_addr(&eth_mask->src)) ||
345                             (!is_zero_ether_addr(&eth_mask->dst) &&
346                              !is_broadcast_ether_addr(&eth_mask->dst))) {
347                                 rte_flow_error_set(error, EINVAL,
348                                                    RTE_FLOW_ERROR_TYPE_ITEM,
349                                                    item,
350                                                    "MAC_addr mask not valid");
351                                 return -rte_errno;
352                         }
353
354                         /* Mask is not allowed. Only exact matches are */
355                         if (eth_mask->type &&
356                             eth_mask->type != RTE_BE16(0xffff)) {
357                                 rte_flow_error_set(error, EINVAL,
358                                                    RTE_FLOW_ERROR_TYPE_ITEM,
359                                                    item,
360                                                    "ethertype mask not valid");
361                                 return -rte_errno;
362                         }
363
364                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
365                                 rte_memcpy(filter->dst_macaddr,
366                                            &eth_spec->dst, 6);
367                                 en |= use_ntuple ?
368                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
369                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
370                         }
371                         if (is_broadcast_ether_addr(&eth_mask->src)) {
372                                 rte_memcpy(filter->src_macaddr,
373                                            &eth_spec->src, 6);
374                                 en |= use_ntuple ?
375                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
376                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
377                         } /*
378                            * else {
379                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
380                            * }
381                            */
382                         if (eth_mask->type) {
383                                 filter->ethertype =
384                                         rte_be_to_cpu_16(eth_spec->type);
385                                 en |= en_ethertype;
386                         }
387
388                         break;
389                 case RTE_FLOW_ITEM_TYPE_VLAN:
390                         vlan_spec = item->spec;
391                         vlan_mask = item->mask;
392                         if (en & en_ethertype) {
393                                 rte_flow_error_set(error, EINVAL,
394                                                    RTE_FLOW_ERROR_TYPE_ITEM,
395                                                    item,
396                                                    "VLAN TPID matching is not"
397                                                    " supported");
398                                 return -rte_errno;
399                         }
400                         if (vlan_mask->tci &&
401                             vlan_mask->tci == RTE_BE16(0x0fff)) {
402                                 /* Only the VLAN ID can be matched. */
403                                 filter->l2_ovlan =
404                                         rte_be_to_cpu_16(vlan_spec->tci &
405                                                          RTE_BE16(0x0fff));
406                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
407                         } else if (vlan_mask->tci) {
408                                 rte_flow_error_set(error, EINVAL,
409                                                    RTE_FLOW_ERROR_TYPE_ITEM,
410                                                    item,
411                                                    "VLAN mask is invalid");
412                                 return -rte_errno;
413                         }
414                         if (vlan_mask->inner_type &&
415                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
416                                 rte_flow_error_set(error, EINVAL,
417                                                    RTE_FLOW_ERROR_TYPE_ITEM,
418                                                    item,
419                                                    "inner ethertype mask not"
420                                                    " valid");
421                                 return -rte_errno;
422                         }
423                         if (vlan_mask->inner_type) {
424                                 filter->ethertype =
425                                         rte_be_to_cpu_16(vlan_spec->inner_type);
426                                 en |= en_ethertype;
427                         }
428
429                         break;
430                 case RTE_FLOW_ITEM_TYPE_IPV4:
431                         /* If mask is not involved, we could use EM filters. */
432                         ipv4_spec = item->spec;
433                         ipv4_mask = item->mask;
434                         /* Only IP DST and SRC fields are maskable. */
435                         if (ipv4_mask->hdr.version_ihl ||
436                             ipv4_mask->hdr.type_of_service ||
437                             ipv4_mask->hdr.total_length ||
438                             ipv4_mask->hdr.packet_id ||
439                             ipv4_mask->hdr.fragment_offset ||
440                             ipv4_mask->hdr.time_to_live ||
441                             ipv4_mask->hdr.next_proto_id ||
442                             ipv4_mask->hdr.hdr_checksum) {
443                                 rte_flow_error_set(error, EINVAL,
444                                            RTE_FLOW_ERROR_TYPE_ITEM,
445                                            item,
446                                            "Invalid IPv4 mask.");
447                                 return -rte_errno;
448                         }
449                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
450                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
451                         if (use_ntuple)
452                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
453                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
454                         else
455                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
456                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
457                         if (ipv4_mask->hdr.src_addr) {
458                                 filter->src_ipaddr_mask[0] =
459                                         ipv4_mask->hdr.src_addr;
460                                 en |= !use_ntuple ? 0 :
461                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
462                         }
463                         if (ipv4_mask->hdr.dst_addr) {
464                                 filter->dst_ipaddr_mask[0] =
465                                         ipv4_mask->hdr.dst_addr;
466                                 en |= !use_ntuple ? 0 :
467                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
468                         }
469                         filter->ip_addr_type = use_ntuple ?
470                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
471                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
472                         if (ipv4_spec->hdr.next_proto_id) {
473                                 filter->ip_protocol =
474                                         ipv4_spec->hdr.next_proto_id;
475                                 if (use_ntuple)
476                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
477                                 else
478                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
479                         }
480                         break;
481                 case RTE_FLOW_ITEM_TYPE_IPV6:
482                         ipv6_spec = item->spec;
483                         ipv6_mask = item->mask;
484
485                         /* Only IP DST and SRC fields are maskable. */
486                         if (ipv6_mask->hdr.vtc_flow ||
487                             ipv6_mask->hdr.payload_len ||
488                             ipv6_mask->hdr.proto ||
489                             ipv6_mask->hdr.hop_limits) {
490                                 rte_flow_error_set(error, EINVAL,
491                                            RTE_FLOW_ERROR_TYPE_ITEM,
492                                            item,
493                                            "Invalid IPv6 mask.");
494                                 return -rte_errno;
495                         }
496
497                         if (use_ntuple)
498                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
499                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
500                         else
501                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
502                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
503                         rte_memcpy(filter->src_ipaddr,
504                                    ipv6_spec->hdr.src_addr, 16);
505                         rte_memcpy(filter->dst_ipaddr,
506                                    ipv6_spec->hdr.dst_addr, 16);
507                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
508                                                    16)) {
509                                 rte_memcpy(filter->src_ipaddr_mask,
510                                            ipv6_mask->hdr.src_addr, 16);
511                                 en |= !use_ntuple ? 0 :
512                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
513                         }
514                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
515                                                    16)) {
516                                 rte_memcpy(filter->dst_ipaddr_mask,
517                                            ipv6_mask->hdr.dst_addr, 16);
518                                 en |= !use_ntuple ? 0 :
519                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
520                         }
521                         filter->ip_addr_type = use_ntuple ?
522                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
523                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
524                         break;
525                 case RTE_FLOW_ITEM_TYPE_TCP:
526                         tcp_spec = item->spec;
527                         tcp_mask = item->mask;
528
529                         /* Check TCP mask. Only DST & SRC ports are maskable */
530                         if (tcp_mask->hdr.sent_seq ||
531                             tcp_mask->hdr.recv_ack ||
532                             tcp_mask->hdr.data_off ||
533                             tcp_mask->hdr.tcp_flags ||
534                             tcp_mask->hdr.rx_win ||
535                             tcp_mask->hdr.cksum ||
536                             tcp_mask->hdr.tcp_urp) {
537                                 rte_flow_error_set(error, EINVAL,
538                                            RTE_FLOW_ERROR_TYPE_ITEM,
539                                            item,
540                                            "Invalid TCP mask");
541                                 return -rte_errno;
542                         }
543                         filter->src_port = tcp_spec->hdr.src_port;
544                         filter->dst_port = tcp_spec->hdr.dst_port;
545                         if (use_ntuple)
546                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
547                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
548                         else
549                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
550                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
551                         if (tcp_mask->hdr.dst_port) {
552                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
553                                 en |= !use_ntuple ? 0 :
554                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
555                         }
556                         if (tcp_mask->hdr.src_port) {
557                                 filter->src_port_mask = tcp_mask->hdr.src_port;
558                                 en |= !use_ntuple ? 0 :
559                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
560                         }
561                         break;
562                 case RTE_FLOW_ITEM_TYPE_UDP:
563                         udp_spec = item->spec;
564                         udp_mask = item->mask;
565
566                         if (udp_mask->hdr.dgram_len ||
567                             udp_mask->hdr.dgram_cksum) {
568                                 rte_flow_error_set(error, EINVAL,
569                                            RTE_FLOW_ERROR_TYPE_ITEM,
570                                            item,
571                                            "Invalid UDP mask");
572                                 return -rte_errno;
573                         }
574
575                         filter->src_port = udp_spec->hdr.src_port;
576                         filter->dst_port = udp_spec->hdr.dst_port;
577                         if (use_ntuple)
578                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
579                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
580                         else
581                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
582                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
583
584                         if (udp_mask->hdr.dst_port) {
585                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
586                                 en |= !use_ntuple ? 0 :
587                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
588                         }
589                         if (udp_mask->hdr.src_port) {
590                                 filter->src_port_mask = udp_mask->hdr.src_port;
591                                 en |= !use_ntuple ? 0 :
592                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
593                         }
594                         break;
595                 case RTE_FLOW_ITEM_TYPE_VXLAN:
596                         vxlan_spec = item->spec;
597                         vxlan_mask = item->mask;
598                         /* Check if VXLAN item is used to describe protocol.
599                          * If yes, both spec and mask should be NULL.
600                          * If no, both spec and mask shouldn't be NULL.
601                          */
602                         if ((!vxlan_spec && vxlan_mask) ||
603                             (vxlan_spec && !vxlan_mask)) {
604                                 rte_flow_error_set(error, EINVAL,
605                                            RTE_FLOW_ERROR_TYPE_ITEM,
606                                            item,
607                                            "Invalid VXLAN item");
608                                 return -rte_errno;
609                         }
610
611                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
612                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
613                             vxlan_spec->flags != 0x8) {
614                                 rte_flow_error_set(error, EINVAL,
615                                            RTE_FLOW_ERROR_TYPE_ITEM,
616                                            item,
617                                            "Invalid VXLAN item");
618                                 return -rte_errno;
619                         }
620
621                         /* Check if VNI is masked. */
622                         if (vxlan_spec && vxlan_mask) {
623                                 vni_masked =
624                                         !!memcmp(vxlan_mask->vni, vni_mask,
625                                                  RTE_DIM(vni_mask));
626                                 if (vni_masked) {
627                                         rte_flow_error_set(error, EINVAL,
628                                                    RTE_FLOW_ERROR_TYPE_ITEM,
629                                                    item,
630                                                    "Invalid VNI mask");
631                                         return -rte_errno;
632                                 }
633
634                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
635                                            vxlan_spec->vni, 3);
636                                 filter->vni =
637                                         rte_be_to_cpu_32(tenant_id_be);
638                                 filter->tunnel_type =
639                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
640                         }
641                         break;
642                 case RTE_FLOW_ITEM_TYPE_NVGRE:
643                         nvgre_spec = item->spec;
644                         nvgre_mask = item->mask;
645                         /* Check if NVGRE item is used to describe protocol.
646                          * If yes, both spec and mask should be NULL.
647                          * If no, both spec and mask shouldn't be NULL.
648                          */
649                         if ((!nvgre_spec && nvgre_mask) ||
650                             (nvgre_spec && !nvgre_mask)) {
651                                 rte_flow_error_set(error, EINVAL,
652                                            RTE_FLOW_ERROR_TYPE_ITEM,
653                                            item,
654                                            "Invalid NVGRE item");
655                                 return -rte_errno;
656                         }
657
658                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
659                             nvgre_spec->protocol != 0x6558) {
660                                 rte_flow_error_set(error, EINVAL,
661                                            RTE_FLOW_ERROR_TYPE_ITEM,
662                                            item,
663                                            "Invalid NVGRE item");
664                                 return -rte_errno;
665                         }
666
667                         if (nvgre_spec && nvgre_mask) {
668                                 tni_masked =
669                                         !!memcmp(nvgre_mask->tni, tni_mask,
670                                                  RTE_DIM(tni_mask));
671                                 if (tni_masked) {
672                                         rte_flow_error_set(error, EINVAL,
673                                                        RTE_FLOW_ERROR_TYPE_ITEM,
674                                                        item,
675                                                        "Invalid TNI mask");
676                                         return -rte_errno;
677                                 }
678                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
679                                            nvgre_spec->tni, 3);
680                                 filter->vni =
681                                         rte_be_to_cpu_32(tenant_id_be);
682                                 filter->tunnel_type =
683                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
684                         }
685                         break;
686                 case RTE_FLOW_ITEM_TYPE_VF:
687                         vf_spec = item->spec;
688                         vf = vf_spec->id;
689                         if (!BNXT_PF(bp)) {
690                                 rte_flow_error_set(error, EINVAL,
691                                            RTE_FLOW_ERROR_TYPE_ITEM,
692                                            item,
693                                            "Configuring on a VF!");
694                                 return -rte_errno;
695                         }
696
697                         if (vf >= bp->pdev->max_vfs) {
698                                 rte_flow_error_set(error, EINVAL,
699                                            RTE_FLOW_ERROR_TYPE_ITEM,
700                                            item,
701                                            "Incorrect VF id!");
702                                 return -rte_errno;
703                         }
704
705                         if (!attr->transfer) {
706                                 rte_flow_error_set(error, ENOTSUP,
707                                            RTE_FLOW_ERROR_TYPE_ITEM,
708                                            item,
709                                            "Matching VF traffic without"
710                                            " affecting it (transfer attribute)"
711                                            " is unsupported");
712                                 return -rte_errno;
713                         }
714
715                         filter->mirror_vnic_id =
716                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
717                         if (dflt_vnic < 0) {
718                                 /* This simply indicates there's no driver
719                                  * loaded. This is not an error.
720                                  */
721                                 rte_flow_error_set(error, EINVAL,
722                                            RTE_FLOW_ERROR_TYPE_ITEM,
723                                            item,
724                                            "Unable to get default VNIC for VF");
725                                 return -rte_errno;
726                         }
727                         filter->mirror_vnic_id = dflt_vnic;
728                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
729                         break;
730                 default:
731                         break;
732                 }
733                 item++;
734         }
735         filter->enables = en;
736
737         return 0;
738 }
739
740 /* Parse attributes */
741 static int
742 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
743                      struct rte_flow_error *error)
744 {
745         /* Must be input direction */
746         if (!attr->ingress) {
747                 rte_flow_error_set(error, EINVAL,
748                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
749                                    attr, "Only support ingress.");
750                 return -rte_errno;
751         }
752
753         /* Not supported */
754         if (attr->egress) {
755                 rte_flow_error_set(error, EINVAL,
756                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
757                                    attr, "No support for egress.");
758                 return -rte_errno;
759         }
760
761         /* Not supported */
762         if (attr->priority) {
763                 rte_flow_error_set(error, EINVAL,
764                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
765                                    attr, "No support for priority.");
766                 return -rte_errno;
767         }
768
769         /* Not supported */
770         if (attr->group) {
771                 rte_flow_error_set(error, EINVAL,
772                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
773                                    attr, "No support for group.");
774                 return -rte_errno;
775         }
776
777         return 0;
778 }
779
780 struct bnxt_filter_info *
781 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
782                    struct bnxt_vnic_info *vnic)
783 {
784         struct bnxt_filter_info *filter1, *f0;
785         struct bnxt_vnic_info *vnic0;
786         int rc;
787
788         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
789         f0 = STAILQ_FIRST(&vnic0->filter);
790
791         //This flow has same DST MAC as the port/l2 filter.
792         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
793                 return f0;
794
795         //This flow needs DST MAC which is not same as port/l2
796         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
797         filter1 = bnxt_get_unused_filter(bp);
798         if (filter1 == NULL)
799                 return NULL;
800         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
801         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
802                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
803         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
804         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
805         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
806                                      filter1);
807         if (rc) {
808                 bnxt_free_filter(bp, filter1);
809                 return NULL;
810         }
811         return filter1;
812 }
813
814 static int
815 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
816                              const struct rte_flow_item pattern[],
817                              const struct rte_flow_action actions[],
818                              const struct rte_flow_attr *attr,
819                              struct rte_flow_error *error,
820                              struct bnxt_filter_info *filter)
821 {
822         const struct rte_flow_action *act = nxt_non_void_action(actions);
823         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
824         const struct rte_flow_action_queue *act_q;
825         const struct rte_flow_action_vf *act_vf;
826         struct bnxt_vnic_info *vnic, *vnic0;
827         struct bnxt_filter_info *filter1;
828         uint32_t vf = 0;
829         int dflt_vnic;
830         int rc;
831
832         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
833                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
834                 rte_flow_error_set(error, EINVAL,
835                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
836                                    "Cannot create flow on RSS queues");
837                 rc = -rte_errno;
838                 goto ret;
839         }
840
841         rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
842                                                filter);
843         if (rc != 0)
844                 goto ret;
845
846         rc = bnxt_flow_parse_attr(attr, error);
847         if (rc != 0)
848                 goto ret;
849         //Since we support ingress attribute only - right now.
850         if (filter->filter_type == HWRM_CFA_EM_FILTER)
851                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
852
853         switch (act->type) {
854         case RTE_FLOW_ACTION_TYPE_QUEUE:
855                 /* Allow this flow. Redirect to a VNIC. */
856                 act_q = (const struct rte_flow_action_queue *)act->conf;
857                 if (act_q->index >= bp->rx_nr_rings) {
858                         rte_flow_error_set(error, EINVAL,
859                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
860                                            "Invalid queue ID.");
861                         rc = -rte_errno;
862                         goto ret;
863                 }
864                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
865
866                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
867                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
868                 if (vnic == NULL) {
869                         rte_flow_error_set(error, EINVAL,
870                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
871                                            "No matching VNIC for queue ID.");
872                         rc = -rte_errno;
873                         goto ret;
874                 }
875                 filter->dst_id = vnic->fw_vnic_id;
876                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
877                 if (filter1 == NULL) {
878                         rc = -ENOSPC;
879                         goto ret;
880                 }
881                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
882                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
883                 break;
884         case RTE_FLOW_ACTION_TYPE_DROP:
885                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
886                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
887                 if (filter1 == NULL) {
888                         rc = -ENOSPC;
889                         goto ret;
890                 }
891                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
892                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
893                         filter->flags =
894                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
895                 else
896                         filter->flags =
897                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
898                 break;
899         case RTE_FLOW_ACTION_TYPE_COUNT:
900                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
901                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
902                 if (filter1 == NULL) {
903                         rc = -ENOSPC;
904                         goto ret;
905                 }
906                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
907                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
908                 break;
909         case RTE_FLOW_ACTION_TYPE_VF:
910                 act_vf = (const struct rte_flow_action_vf *)act->conf;
911                 vf = act_vf->id;
912                 if (!BNXT_PF(bp)) {
913                         rte_flow_error_set(error, EINVAL,
914                                    RTE_FLOW_ERROR_TYPE_ACTION,
915                                    act,
916                                    "Configuring on a VF!");
917                         rc = -rte_errno;
918                         goto ret;
919                 }
920
921                 if (vf >= bp->pdev->max_vfs) {
922                         rte_flow_error_set(error, EINVAL,
923                                    RTE_FLOW_ERROR_TYPE_ACTION,
924                                    act,
925                                    "Incorrect VF id!");
926                         rc = -rte_errno;
927                         goto ret;
928                 }
929
930                 filter->mirror_vnic_id =
931                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
932                 if (dflt_vnic < 0) {
933                         /* This simply indicates there's no driver loaded.
934                          * This is not an error.
935                          */
936                         rte_flow_error_set(error, EINVAL,
937                                    RTE_FLOW_ERROR_TYPE_ACTION,
938                                    act,
939                                    "Unable to get default VNIC for VF");
940                         rc = -rte_errno;
941                         goto ret;
942                 }
943                 filter->mirror_vnic_id = dflt_vnic;
944                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
945
946                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
947                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
948                 if (filter1 == NULL) {
949                         rc = -ENOSPC;
950                         goto ret;
951                 }
952                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
953                 break;
954
955         default:
956                 rte_flow_error_set(error, EINVAL,
957                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
958                                    "Invalid action.");
959                 rc = -rte_errno;
960                 goto ret;
961         }
962
963         act = nxt_non_void_action(++act);
964         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
965                 rte_flow_error_set(error, EINVAL,
966                                    RTE_FLOW_ERROR_TYPE_ACTION,
967                                    act, "Invalid action.");
968                 rc = -rte_errno;
969                 goto ret;
970         }
971 ret:
972         return rc;
973 }
974
975 static int
976 bnxt_flow_validate(struct rte_eth_dev *dev,
977                 const struct rte_flow_attr *attr,
978                 const struct rte_flow_item pattern[],
979                 const struct rte_flow_action actions[],
980                 struct rte_flow_error *error)
981 {
982         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
983         struct bnxt_filter_info *filter;
984         int ret = 0;
985
986         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
987         if (ret != 0)
988                 return ret;
989
990         filter = bnxt_get_unused_filter(bp);
991         if (filter == NULL) {
992                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
993                 return -ENOMEM;
994         }
995
996         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
997                                            error, filter);
998         /* No need to hold on to this filter if we are just validating flow */
999         filter->fw_l2_filter_id = UINT64_MAX;
1000         bnxt_free_filter(bp, filter);
1001
1002         return ret;
1003 }
1004
1005 static int
1006 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1007 {
1008         struct bnxt_filter_info *mf;
1009         struct rte_flow *flow;
1010         int i;
1011
1012         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1013                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1014
1015                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1016                         mf = flow->filter;
1017
1018                         if (mf->filter_type == nf->filter_type &&
1019                             mf->flags == nf->flags &&
1020                             mf->src_port == nf->src_port &&
1021                             mf->src_port_mask == nf->src_port_mask &&
1022                             mf->dst_port == nf->dst_port &&
1023                             mf->dst_port_mask == nf->dst_port_mask &&
1024                             mf->ip_protocol == nf->ip_protocol &&
1025                             mf->ip_addr_type == nf->ip_addr_type &&
1026                             mf->ethertype == nf->ethertype &&
1027                             mf->vni == nf->vni &&
1028                             mf->tunnel_type == nf->tunnel_type &&
1029                             mf->l2_ovlan == nf->l2_ovlan &&
1030                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1031                             mf->l2_ivlan == nf->l2_ivlan &&
1032                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1033                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1034                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1035                                     ETHER_ADDR_LEN) &&
1036                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1037                                     ETHER_ADDR_LEN) &&
1038                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1039                                     ETHER_ADDR_LEN) &&
1040                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1041                                     sizeof(nf->src_ipaddr)) &&
1042                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1043                                     sizeof(nf->src_ipaddr_mask)) &&
1044                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1045                                     sizeof(nf->dst_ipaddr)) &&
1046                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1047                                     sizeof(nf->dst_ipaddr_mask))) {
1048                                 if (mf->dst_id == nf->dst_id)
1049                                         return -EEXIST;
1050                                 /* Same Flow, Different queue
1051                                  * Clear the old ntuple filter
1052                                  */
1053                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1054                                         bnxt_hwrm_clear_em_filter(bp, mf);
1055                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1056                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1057                                 /* Free the old filter, update flow
1058                                  * with new filter
1059                                  */
1060                                 bnxt_free_filter(bp, mf);
1061                                 flow->filter = nf;
1062                                 return -EXDEV;
1063                         }
1064                 }
1065         }
1066         return 0;
1067 }
1068
1069 static struct rte_flow *
1070 bnxt_flow_create(struct rte_eth_dev *dev,
1071                   const struct rte_flow_attr *attr,
1072                   const struct rte_flow_item pattern[],
1073                   const struct rte_flow_action actions[],
1074                   struct rte_flow_error *error)
1075 {
1076         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1077         struct bnxt_filter_info *filter;
1078         struct bnxt_vnic_info *vnic = NULL;
1079         bool update_flow = false;
1080         struct rte_flow *flow;
1081         unsigned int i;
1082         int ret = 0;
1083
1084         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1085         if (!flow) {
1086                 rte_flow_error_set(error, ENOMEM,
1087                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1088                                    "Failed to allocate memory");
1089                 return flow;
1090         }
1091
1092         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1093         if (ret != 0) {
1094                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1095                 goto free_flow;
1096         }
1097
1098         filter = bnxt_get_unused_filter(bp);
1099         if (filter == NULL) {
1100                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1101                 goto free_flow;
1102         }
1103
1104         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1105                                            error, filter);
1106         if (ret != 0)
1107                 goto free_filter;
1108
1109         ret = bnxt_match_filter(bp, filter);
1110         if (ret == -EEXIST) {
1111                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1112                 /* Clear the filter that was created as part of
1113                  * validate_and_parse_flow() above
1114                  */
1115                 bnxt_hwrm_clear_l2_filter(bp, filter);
1116                 goto free_filter;
1117         } else if (ret == -EXDEV) {
1118                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1119                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1120                 update_flow = true;
1121         }
1122
1123         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1124                 filter->enables |=
1125                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1126                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1127         }
1128         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1129                 filter->enables |=
1130                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1131                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1132         }
1133
1134         for (i = 0; i < bp->nr_vnics; i++) {
1135                 vnic = &bp->vnic_info[i];
1136                 if (filter->dst_id == vnic->fw_vnic_id)
1137                         break;
1138         }
1139
1140         if (!ret) {
1141                 flow->filter = filter;
1142                 flow->vnic = vnic;
1143                 if (update_flow) {
1144                         ret = -EXDEV;
1145                         goto free_flow;
1146                 }
1147                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1148                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1149                 return flow;
1150         }
1151 free_filter:
1152         bnxt_free_filter(bp, filter);
1153 free_flow:
1154         if (ret == -EEXIST)
1155                 rte_flow_error_set(error, ret,
1156                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1157                                    "Matching Flow exists.");
1158         else if (ret == -EXDEV)
1159                 rte_flow_error_set(error, ret,
1160                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1161                                    "Flow with pattern exists, updating destination queue");
1162         else
1163                 rte_flow_error_set(error, -ret,
1164                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1165                                    "Failed to create flow.");
1166         rte_free(flow);
1167         flow = NULL;
1168         return flow;
1169 }
1170
1171 static int
1172 bnxt_flow_destroy(struct rte_eth_dev *dev,
1173                   struct rte_flow *flow,
1174                   struct rte_flow_error *error)
1175 {
1176         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1177         struct bnxt_filter_info *filter = flow->filter;
1178         struct bnxt_vnic_info *vnic = flow->vnic;
1179         int ret = 0;
1180
1181         ret = bnxt_match_filter(bp, filter);
1182         if (ret == 0)
1183                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1184         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1185                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1186         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1187                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1188         else
1189                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1190         if (!ret) {
1191                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1192                 rte_free(flow);
1193         } else {
1194                 rte_flow_error_set(error, -ret,
1195                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1196                                    "Failed to destroy flow.");
1197         }
1198
1199         return ret;
1200 }
1201
1202 static int
1203 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1204 {
1205         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1206         struct bnxt_vnic_info *vnic;
1207         struct rte_flow *flow;
1208         unsigned int i;
1209         int ret = 0;
1210
1211         for (i = 0; i < bp->nr_vnics; i++) {
1212                 vnic = &bp->vnic_info[i];
1213                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1214                         struct bnxt_filter_info *filter = flow->filter;
1215
1216                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1217                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1218                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1219                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1220
1221                         if (ret) {
1222                                 rte_flow_error_set(error, -ret,
1223                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1224                                                    NULL,
1225                                                    "Failed to flush flow in HW.");
1226                                 return -rte_errno;
1227                         }
1228
1229                         STAILQ_REMOVE(&vnic->flow_list, flow,
1230                                       rte_flow, next);
1231                         rte_free(flow);
1232                 }
1233         }
1234
1235         return ret;
1236 }
1237
1238 const struct rte_flow_ops bnxt_flow_ops = {
1239         .validate = bnxt_flow_validate,
1240         .create = bnxt_flow_create,
1241         .destroy = bnxt_flow_destroy,
1242         .flush = bnxt_flow_flush,
1243 };