net/bnxt: fix build
[dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <rte_log.h>
37 #include <rte_malloc.h>
38 #include <rte_flow.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
41
42 #include "bnxt.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
47
48 /*
49  * Filter Functions
50  */
51
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
53 {
54         struct bnxt_filter_info *filter;
55
56         /* Find the 1st unused filter from the free_filter_list pool*/
57         filter = STAILQ_FIRST(&bp->free_filter_list);
58         if (!filter) {
59                 RTE_LOG(ERR, PMD, "No more free filter resources\n");
60                 return NULL;
61         }
62         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
63
64         /* Default to L2 MAC Addr filter */
65         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
69                ETHER_ADDR_LEN);
70         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
71         return filter;
72 }
73
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
75 {
76         struct bnxt_filter_info *filter;
77
78         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
79         if (!filter) {
80                 RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
81                         vf);
82                 return NULL;
83         }
84
85         filter->fw_l2_filter_id = UINT64_MAX;
86         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
87         return filter;
88 }
89
90 void bnxt_init_filters(struct bnxt *bp)
91 {
92         struct bnxt_filter_info *filter;
93         int i, max_filters;
94
95         max_filters = bp->max_l2_ctx;
96         STAILQ_INIT(&bp->free_filter_list);
97         for (i = 0; i < max_filters; i++) {
98                 filter = &bp->filter_info[i];
99                 filter->fw_l2_filter_id = -1;
100                 filter->fw_em_filter_id = -1;
101                 filter->fw_ntuple_filter_id = -1;
102                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
103         }
104 }
105
106 void bnxt_free_all_filters(struct bnxt *bp)
107 {
108         struct bnxt_vnic_info *vnic;
109         struct bnxt_filter_info *filter, *temp_filter;
110         int i;
111
112         for (i = 0; i < MAX_FF_POOLS; i++) {
113                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114                         filter = STAILQ_FIRST(&vnic->filter);
115                         while (filter) {
116                                 temp_filter = STAILQ_NEXT(filter, next);
117                                 STAILQ_REMOVE(&vnic->filter, filter,
118                                               bnxt_filter_info, next);
119                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
120                                                    filter, next);
121                                 filter = temp_filter;
122                         }
123                         STAILQ_INIT(&vnic->filter);
124                 }
125         }
126
127         for (i = 0; i < bp->pf.max_vfs; i++) {
128                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129                         bnxt_hwrm_clear_l2_filter(bp, filter);
130                 }
131         }
132 }
133
134 void bnxt_free_filter_mem(struct bnxt *bp)
135 {
136         struct bnxt_filter_info *filter;
137         uint16_t max_filters, i;
138         int rc = 0;
139
140         if (bp->filter_info == NULL)
141                 return;
142
143         /* Ensure that all filters are freed */
144         max_filters = bp->max_l2_ctx;
145         for (i = 0; i < max_filters; i++) {
146                 filter = &bp->filter_info[i];
147                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148                         RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
149                         /* Call HWRM to try to free filter again */
150                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
151                         if (rc)
152                                 RTE_LOG(ERR, PMD,
153                                        "HWRM filter cannot be freed rc = %d\n",
154                                         rc);
155                 }
156                 filter->fw_l2_filter_id = UINT64_MAX;
157         }
158         STAILQ_INIT(&bp->free_filter_list);
159
160         rte_free(bp->filter_info);
161         bp->filter_info = NULL;
162 }
163
164 int bnxt_alloc_filter_mem(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter_mem;
167         uint16_t max_filters;
168
169         max_filters = bp->max_l2_ctx;
170         /* Allocate memory for VNIC pool and filter pool */
171         filter_mem = rte_zmalloc("bnxt_filter_info",
172                                  max_filters * sizeof(struct bnxt_filter_info),
173                                  0);
174         if (filter_mem == NULL) {
175                 RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
176                         max_filters);
177                 return -ENOMEM;
178         }
179         bp->filter_info = filter_mem;
180         return 0;
181 }
182
183 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
184 {
185         struct bnxt_filter_info *filter;
186
187         /* Find the 1st unused filter from the free_filter_list pool*/
188         filter = STAILQ_FIRST(&bp->free_filter_list);
189         if (!filter) {
190                 RTE_LOG(ERR, PMD, "No more free filter resources\n");
191                 return NULL;
192         }
193         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
194
195         return filter;
196 }
197
198 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
199 {
200         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
201 }
202
203 static int
204 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
205                         const struct rte_flow_item pattern[],
206                         const struct rte_flow_action actions[],
207                         struct rte_flow_error *error)
208 {
209         if (!pattern) {
210                 rte_flow_error_set(error, EINVAL,
211                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212                         NULL, "NULL pattern.");
213                 return -rte_errno;
214         }
215
216         if (!actions) {
217                 rte_flow_error_set(error, EINVAL,
218                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219                                    NULL, "NULL action.");
220                 return -rte_errno;
221         }
222
223         if (!attr) {
224                 rte_flow_error_set(error, EINVAL,
225                                    RTE_FLOW_ERROR_TYPE_ATTR,
226                                    NULL, "NULL attribute.");
227                 return -rte_errno;
228         }
229
230         return 0;
231 }
232
233 static const struct rte_flow_item *
234 nxt_non_void_pattern(const struct rte_flow_item *cur)
235 {
236         while (1) {
237                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
238                         return cur;
239                 cur++;
240         }
241 }
242
243 static const struct rte_flow_action *
244 nxt_non_void_action(const struct rte_flow_action *cur)
245 {
246         while (1) {
247                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
248                         return cur;
249                 cur++;
250         }
251 }
252
253 static inline int check_zero_bytes(const uint8_t *bytes, int len)
254 {
255         int i;
256         for (i = 0; i < len; i++)
257                 if (bytes[i] != 0x00)
258                         return 0;
259         return 1;
260 }
261
262 static int
263 bnxt_filter_type_check(const struct rte_flow_item pattern[],
264                        struct rte_flow_error *error __rte_unused)
265 {
266         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
267         int use_ntuple = 1;
268
269         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
270                 switch (item->type) {
271                 case RTE_FLOW_ITEM_TYPE_ETH:
272                         use_ntuple = 1;
273                         break;
274                 case RTE_FLOW_ITEM_TYPE_VLAN:
275                         use_ntuple = 0;
276                         break;
277                 case RTE_FLOW_ITEM_TYPE_IPV4:
278                 case RTE_FLOW_ITEM_TYPE_IPV6:
279                 case RTE_FLOW_ITEM_TYPE_TCP:
280                 case RTE_FLOW_ITEM_TYPE_UDP:
281                         /* FALLTHROUGH */
282                         /* need ntuple match, reset exact match */
283                         if (!use_ntuple) {
284                                 RTE_LOG(ERR, PMD,
285                                         "VLAN flow cannot use NTUPLE filter\n");
286                                 rte_flow_error_set(error, EINVAL,
287                                                    RTE_FLOW_ERROR_TYPE_ITEM,
288                                                    item,
289                                                    "Cannot use VLAN with NTUPLE");
290                                 return -rte_errno;
291                         }
292                         use_ntuple |= 1;
293                         break;
294                 default:
295                         RTE_LOG(ERR, PMD, "Unknown Flow type");
296                         use_ntuple |= 1;
297                 }
298                 item++;
299         }
300         return use_ntuple;
301 }
302
303 static int
304 bnxt_validate_and_parse_flow_type(const struct rte_flow_item pattern[],
305                                   struct rte_flow_error *error,
306                                   struct bnxt_filter_info *filter)
307 {
308         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
309         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
310         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
311         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
312         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
313         const struct rte_flow_item_udp *udp_spec, *udp_mask;
314         const struct rte_flow_item_eth *eth_spec, *eth_mask;
315         const struct rte_flow_item_nvgre *nvgre_spec;
316         const struct rte_flow_item_nvgre *nvgre_mask;
317         const struct rte_flow_item_vxlan *vxlan_spec;
318         const struct rte_flow_item_vxlan *vxlan_mask;
319         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
320         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
321         uint32_t tenant_id_be = 0;
322         bool vni_masked = 0;
323         bool tni_masked = 0;
324         int use_ntuple;
325         uint32_t en = 0;
326
327         use_ntuple = bnxt_filter_type_check(pattern, error);
328         RTE_LOG(ERR, PMD, "Use NTUPLE %d\n", use_ntuple);
329         if (use_ntuple < 0)
330                 return use_ntuple;
331
332         filter->filter_type = use_ntuple ?
333                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
334
335         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
336                 if (item->last) {
337                         /* last or range is NOT supported as match criteria */
338                         rte_flow_error_set(error, EINVAL,
339                                            RTE_FLOW_ERROR_TYPE_ITEM,
340                                            item,
341                                            "No support for range");
342                         return -rte_errno;
343                 }
344                 if (!item->spec || !item->mask) {
345                         rte_flow_error_set(error, EINVAL,
346                                            RTE_FLOW_ERROR_TYPE_ITEM,
347                                            item,
348                                            "spec/mask is NULL");
349                         return -rte_errno;
350                 }
351                 switch (item->type) {
352                 case RTE_FLOW_ITEM_TYPE_ETH:
353                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
354                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
355
356                         /* Source MAC address mask cannot be partially set.
357                          * Should be All 0's or all 1's.
358                          * Destination MAC address mask must not be partially
359                          * set. Should be all 1's or all 0's.
360                          */
361                         if ((!is_zero_ether_addr(&eth_mask->src) &&
362                              !is_broadcast_ether_addr(&eth_mask->src)) ||
363                             (!is_zero_ether_addr(&eth_mask->dst) &&
364                              !is_broadcast_ether_addr(&eth_mask->dst))) {
365                                 rte_flow_error_set(error, EINVAL,
366                                                    RTE_FLOW_ERROR_TYPE_ITEM,
367                                                    item,
368                                                    "MAC_addr mask not valid");
369                                 return -rte_errno;
370                         }
371
372                         /* Mask is not allowed. Only exact matches are */
373                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
374                                 rte_flow_error_set(error, EINVAL,
375                                                    RTE_FLOW_ERROR_TYPE_ITEM,
376                                                    item,
377                                                    "ethertype mask not valid");
378                                 return -rte_errno;
379                         }
380
381                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
382                                 rte_memcpy(filter->dst_macaddr,
383                                            &eth_spec->dst, 6);
384                                 en |= use_ntuple ?
385                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
386                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
387                         }
388                         if (is_broadcast_ether_addr(&eth_mask->src)) {
389                                 rte_memcpy(filter->src_macaddr,
390                                            &eth_spec->src, 6);
391                                 en |= use_ntuple ?
392                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
393                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
394                         } /*
395                            * else {
396                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
397                            * }
398                            */
399                         if (eth_spec->type) {
400                                 filter->ethertype =
401                                         rte_be_to_cpu_16(eth_spec->type);
402                                 en |= use_ntuple ?
403                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
404                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
405                         }
406
407                         break;
408                 case RTE_FLOW_ITEM_TYPE_VLAN:
409                         vlan_spec =
410                                 (const struct rte_flow_item_vlan *)item->spec;
411                         vlan_mask =
412                                 (const struct rte_flow_item_vlan *)item->mask;
413                         if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
414                                 /* Only the VLAN ID can be matched. */
415                                 filter->l2_ovlan =
416                                         rte_be_to_cpu_16(vlan_spec->tci &
417                                                          0xFFF);
418                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
419                         } else {
420                                 rte_flow_error_set(error, EINVAL,
421                                                    RTE_FLOW_ERROR_TYPE_ITEM,
422                                                    item,
423                                                    "VLAN mask is invalid");
424                                 return -rte_errno;
425                         }
426
427                         break;
428                 case RTE_FLOW_ITEM_TYPE_IPV4:
429                         /* If mask is not involved, we could use EM filters. */
430                         ipv4_spec =
431                                 (const struct rte_flow_item_ipv4 *)item->spec;
432                         ipv4_mask =
433                                 (const struct rte_flow_item_ipv4 *)item->mask;
434                         /* Only IP DST and SRC fields are maskable. */
435                         if (ipv4_mask->hdr.version_ihl ||
436                             ipv4_mask->hdr.type_of_service ||
437                             ipv4_mask->hdr.total_length ||
438                             ipv4_mask->hdr.packet_id ||
439                             ipv4_mask->hdr.fragment_offset ||
440                             ipv4_mask->hdr.time_to_live ||
441                             ipv4_mask->hdr.next_proto_id ||
442                             ipv4_mask->hdr.hdr_checksum) {
443                                 rte_flow_error_set(error, EINVAL,
444                                            RTE_FLOW_ERROR_TYPE_ITEM,
445                                            item,
446                                            "Invalid IPv4 mask.");
447                                 return -rte_errno;
448                         }
449                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
450                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
451                         if (use_ntuple)
452                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
453                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
454                         else
455                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
456                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
457                         if (ipv4_mask->hdr.src_addr) {
458                                 filter->src_ipaddr_mask[0] =
459                                         ipv4_mask->hdr.src_addr;
460                                 en |= !use_ntuple ? 0 :
461                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
462                         }
463                         if (ipv4_mask->hdr.dst_addr) {
464                                 filter->dst_ipaddr_mask[0] =
465                                         ipv4_mask->hdr.dst_addr;
466                                 en |= !use_ntuple ? 0 :
467                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
468                         }
469                         filter->ip_addr_type = use_ntuple ?
470                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
471                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
472                         if (ipv4_spec->hdr.next_proto_id) {
473                                 filter->ip_protocol =
474                                         ipv4_spec->hdr.next_proto_id;
475                                 if (use_ntuple)
476                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
477                                 else
478                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
479                         }
480                         break;
481                 case RTE_FLOW_ITEM_TYPE_IPV6:
482                         ipv6_spec =
483                                 (const struct rte_flow_item_ipv6 *)item->spec;
484                         ipv6_mask =
485                                 (const struct rte_flow_item_ipv6 *)item->mask;
486
487                         /* Only IP DST and SRC fields are maskable. */
488                         if (ipv6_mask->hdr.vtc_flow ||
489                             ipv6_mask->hdr.payload_len ||
490                             ipv6_mask->hdr.proto ||
491                             ipv6_mask->hdr.hop_limits) {
492                                 rte_flow_error_set(error, EINVAL,
493                                            RTE_FLOW_ERROR_TYPE_ITEM,
494                                            item,
495                                            "Invalid IPv6 mask.");
496                                 return -rte_errno;
497                         }
498
499                         if (use_ntuple)
500                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
501                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
502                         else
503                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
504                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
505                         rte_memcpy(filter->src_ipaddr,
506                                    ipv6_spec->hdr.src_addr, 16);
507                         rte_memcpy(filter->dst_ipaddr,
508                                    ipv6_spec->hdr.dst_addr, 16);
509                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
510                                 rte_memcpy(filter->src_ipaddr_mask,
511                                            ipv6_mask->hdr.src_addr, 16);
512                                 en |= !use_ntuple ? 0 :
513                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
514                         }
515                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
516                                 rte_memcpy(filter->dst_ipaddr_mask,
517                                            ipv6_mask->hdr.dst_addr, 16);
518                                 en |= !use_ntuple ? 0 :
519                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
520                         }
521                         filter->ip_addr_type = use_ntuple ?
522                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
523                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
524                         break;
525                 case RTE_FLOW_ITEM_TYPE_TCP:
526                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
527                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
528
529                         /* Check TCP mask. Only DST & SRC ports are maskable */
530                         if (tcp_mask->hdr.sent_seq ||
531                             tcp_mask->hdr.recv_ack ||
532                             tcp_mask->hdr.data_off ||
533                             tcp_mask->hdr.tcp_flags ||
534                             tcp_mask->hdr.rx_win ||
535                             tcp_mask->hdr.cksum ||
536                             tcp_mask->hdr.tcp_urp) {
537                                 rte_flow_error_set(error, EINVAL,
538                                            RTE_FLOW_ERROR_TYPE_ITEM,
539                                            item,
540                                            "Invalid TCP mask");
541                                 return -rte_errno;
542                         }
543                         filter->src_port = tcp_spec->hdr.src_port;
544                         filter->dst_port = tcp_spec->hdr.dst_port;
545                         if (use_ntuple)
546                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
547                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
548                         else
549                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
550                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
551                         if (tcp_mask->hdr.dst_port) {
552                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
553                                 en |= !use_ntuple ? 0 :
554                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
555                         }
556                         if (tcp_mask->hdr.src_port) {
557                                 filter->src_port_mask = tcp_mask->hdr.src_port;
558                                 en |= !use_ntuple ? 0 :
559                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
560                         }
561                         break;
562                 case RTE_FLOW_ITEM_TYPE_UDP:
563                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
564                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
565
566                         if (udp_mask->hdr.dgram_len ||
567                             udp_mask->hdr.dgram_cksum) {
568                                 rte_flow_error_set(error, EINVAL,
569                                            RTE_FLOW_ERROR_TYPE_ITEM,
570                                            item,
571                                            "Invalid UDP mask");
572                                 return -rte_errno;
573                         }
574
575                         filter->src_port = udp_spec->hdr.src_port;
576                         filter->dst_port = udp_spec->hdr.dst_port;
577                         if (use_ntuple)
578                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
579                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
580                         else
581                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
582                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
583
584                         if (udp_mask->hdr.dst_port) {
585                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
586                                 en |= !use_ntuple ? 0 :
587                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
588                         }
589                         if (udp_mask->hdr.src_port) {
590                                 filter->src_port_mask = udp_mask->hdr.src_port;
591                                 en |= !use_ntuple ? 0 :
592                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
593                         }
594                         break;
595                 case RTE_FLOW_ITEM_TYPE_VXLAN:
596                         vxlan_spec =
597                                 (const struct rte_flow_item_vxlan *)item->spec;
598                         vxlan_mask =
599                                 (const struct rte_flow_item_vxlan *)item->mask;
600                         /* Check if VXLAN item is used to describe protocol.
601                          * If yes, both spec and mask should be NULL.
602                          * If no, both spec and mask shouldn't be NULL.
603                          */
604                         if ((!vxlan_spec && vxlan_mask) ||
605                             (vxlan_spec && !vxlan_mask)) {
606                                 rte_flow_error_set(error, EINVAL,
607                                            RTE_FLOW_ERROR_TYPE_ITEM,
608                                            item,
609                                            "Invalid VXLAN item");
610                                 return -rte_errno;
611                         }
612
613                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
614                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
615                             vxlan_spec->flags != 0x8) {
616                                 rte_flow_error_set(error, EINVAL,
617                                            RTE_FLOW_ERROR_TYPE_ITEM,
618                                            item,
619                                            "Invalid VXLAN item");
620                                 return -rte_errno;
621                         }
622
623                         /* Check if VNI is masked. */
624                         if (vxlan_spec && vxlan_mask) {
625                                 vni_masked =
626                                         !!memcmp(vxlan_mask->vni, vni_mask,
627                                                  RTE_DIM(vni_mask));
628                                 if (vni_masked) {
629                                         rte_flow_error_set(error, EINVAL,
630                                                    RTE_FLOW_ERROR_TYPE_ITEM,
631                                                    item,
632                                                    "Invalid VNI mask");
633                                         return -rte_errno;
634                                 }
635
636                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
637                                            vxlan_spec->vni, 3);
638                                 filter->vni =
639                                         rte_be_to_cpu_32(tenant_id_be);
640                                 filter->tunnel_type =
641                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
642                         }
643                         break;
644                 case RTE_FLOW_ITEM_TYPE_NVGRE:
645                         nvgre_spec =
646                                 (const struct rte_flow_item_nvgre *)item->spec;
647                         nvgre_mask =
648                                 (const struct rte_flow_item_nvgre *)item->mask;
649                         /* Check if NVGRE item is used to describe protocol.
650                          * If yes, both spec and mask should be NULL.
651                          * If no, both spec and mask shouldn't be NULL.
652                          */
653                         if ((!nvgre_spec && nvgre_mask) ||
654                             (nvgre_spec && !nvgre_mask)) {
655                                 rte_flow_error_set(error, EINVAL,
656                                            RTE_FLOW_ERROR_TYPE_ITEM,
657                                            item,
658                                            "Invalid NVGRE item");
659                                 return -rte_errno;
660                         }
661
662                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
663                             nvgre_spec->protocol != 0x6558) {
664                                 rte_flow_error_set(error, EINVAL,
665                                            RTE_FLOW_ERROR_TYPE_ITEM,
666                                            item,
667                                            "Invalid NVGRE item");
668                                 return -rte_errno;
669                         }
670
671                         if (nvgre_spec && nvgre_mask) {
672                                 tni_masked =
673                                         !!memcmp(nvgre_mask->tni, tni_mask,
674                                                  RTE_DIM(tni_mask));
675                                 if (tni_masked) {
676                                         rte_flow_error_set(error, EINVAL,
677                                                        RTE_FLOW_ERROR_TYPE_ITEM,
678                                                        item,
679                                                        "Invalid TNI mask");
680                                         return -rte_errno;
681                                 }
682                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
683                                            nvgre_spec->tni, 3);
684                                 filter->vni =
685                                         rte_be_to_cpu_32(tenant_id_be);
686                                 filter->tunnel_type =
687                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
688                         }
689                         break;
690                 default:
691                         break;
692                 }
693                 item++;
694         }
695         filter->enables = en;
696
697         return 0;
698 }
699
700 /* Parse attributes */
701 static int
702 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
703                      struct rte_flow_error *error)
704 {
705         /* Must be input direction */
706         if (!attr->ingress) {
707                 rte_flow_error_set(error, EINVAL,
708                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
709                                    attr, "Only support ingress.");
710                 return -rte_errno;
711         }
712
713         /* Not supported */
714         if (attr->egress) {
715                 rte_flow_error_set(error, EINVAL,
716                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
717                                    attr, "No support for egress.");
718                 return -rte_errno;
719         }
720
721         /* Not supported */
722         if (attr->priority) {
723                 rte_flow_error_set(error, EINVAL,
724                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
725                                    attr, "No support for priority.");
726                 return -rte_errno;
727         }
728
729         /* Not supported */
730         if (attr->group) {
731                 rte_flow_error_set(error, EINVAL,
732                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
733                                    attr, "No support for group.");
734                 return -rte_errno;
735         }
736
737         return 0;
738 }
739
740 struct bnxt_filter_info *
741 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
742                    struct bnxt_vnic_info *vnic)
743 {
744         struct bnxt_filter_info *filter1, *f0;
745         struct bnxt_vnic_info *vnic0;
746         int rc;
747
748         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
749         f0 = STAILQ_FIRST(&vnic0->filter);
750
751         //This flow has same DST MAC as the port/l2 filter.
752         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
753                 return f0;
754
755         //This flow needs DST MAC which is not same as port/l2
756         RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
757         filter1 = bnxt_get_unused_filter(bp);
758         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
759         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
760                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
761         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
762         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
763         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
764                                      filter1);
765         if (rc) {
766                 bnxt_free_filter(bp, filter1);
767                 return NULL;
768         }
769         STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
770         return filter1;
771 }
772
773 static int
774 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
775                              const struct rte_flow_item pattern[],
776                              const struct rte_flow_action actions[],
777                              const struct rte_flow_attr *attr,
778                              struct rte_flow_error *error,
779                              struct bnxt_filter_info *filter)
780 {
781         const struct rte_flow_action *act = nxt_non_void_action(actions);
782         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
783         const struct rte_flow_action_queue *act_q;
784         struct bnxt_vnic_info *vnic, *vnic0;
785         struct bnxt_filter_info *filter1;
786         int rc;
787
788         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
789                 RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
790                 rte_flow_error_set(error, EINVAL,
791                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
792                                    "Cannot create flow on RSS queues");
793                 rc = -rte_errno;
794                 goto ret;
795         }
796
797         rc = bnxt_validate_and_parse_flow_type(pattern, error, filter);
798         if (rc != 0)
799                 goto ret;
800
801         rc = bnxt_flow_parse_attr(attr, error);
802         if (rc != 0)
803                 goto ret;
804         //Since we support ingress attribute only - right now.
805         filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
806
807         switch (act->type) {
808         case RTE_FLOW_ACTION_TYPE_QUEUE:
809                 /* Allow this flow. Redirect to a VNIC. */
810                 act_q = (const struct rte_flow_action_queue *)act->conf;
811                 if (act_q->index >= bp->rx_nr_rings) {
812                         rte_flow_error_set(error, EINVAL,
813                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
814                                            "Invalid queue ID.");
815                         rc = -rte_errno;
816                         goto ret;
817                 }
818                 RTE_LOG(ERR, PMD, "Queue index %d\n", act_q->index);
819
820                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
821                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
822                 if (vnic == NULL) {
823                         rte_flow_error_set(error, EINVAL,
824                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
825                                            "No matching VNIC for queue ID.");
826                         rc = -rte_errno;
827                         goto ret;
828                 }
829                 filter->dst_id = vnic->fw_vnic_id;
830                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
831                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
832                 RTE_LOG(DEBUG, PMD, "VNIC found\n");
833                 break;
834         case RTE_FLOW_ACTION_TYPE_DROP:
835                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
836                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
837                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
838                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
839                         filter->flags =
840                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
841                 else
842                         filter->flags =
843                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
844                 break;
845         case RTE_FLOW_ACTION_TYPE_COUNT:
846                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
847                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
848                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
849                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
850                 break;
851         default:
852                 rte_flow_error_set(error, EINVAL,
853                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
854                                    "Invalid action.");
855                 rc = -rte_errno;
856                 goto ret;
857         }
858
859         act = nxt_non_void_action(++act);
860         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
861                 rte_flow_error_set(error, EINVAL,
862                                    RTE_FLOW_ERROR_TYPE_ACTION,
863                                    act, "Invalid action.");
864                 rc = -rte_errno;
865                 goto ret;
866         }
867 ret:
868         return rc;
869 }
870
871 static int
872 bnxt_flow_validate(struct rte_eth_dev *dev,
873                 const struct rte_flow_attr *attr,
874                 const struct rte_flow_item pattern[],
875                 const struct rte_flow_action actions[],
876                 struct rte_flow_error *error)
877 {
878         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
879         struct bnxt_filter_info *filter;
880         int ret = 0;
881
882         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
883         if (ret != 0)
884                 return ret;
885
886         filter = bnxt_get_unused_filter(bp);
887         if (filter == NULL) {
888                 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
889                 return -ENOMEM;
890         }
891
892         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
893                                            error, filter);
894         /* No need to hold on to this filter if we are just validating flow */
895         bnxt_free_filter(bp, filter);
896
897         return ret;
898 }
899
900 static struct rte_flow *
901 bnxt_flow_create(struct rte_eth_dev *dev,
902                   const struct rte_flow_attr *attr,
903                   const struct rte_flow_item pattern[],
904                   const struct rte_flow_action actions[],
905                   struct rte_flow_error *error)
906 {
907         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
908         struct bnxt_filter_info *filter;
909         struct bnxt_vnic_info *vnic = NULL;
910         struct rte_flow *flow;
911         unsigned int i;
912         int ret = 0;
913
914         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
915         if (!flow) {
916                 rte_flow_error_set(error, ENOMEM,
917                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
918                                    "Failed to allocate memory");
919                 return flow;
920         }
921
922         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
923         if (ret != 0) {
924                 RTE_LOG(ERR, PMD, "Not a validate flow.\n");
925                 goto free_flow;
926         }
927
928         filter = bnxt_get_unused_filter(bp);
929         if (filter == NULL) {
930                 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
931                 goto free_flow;
932         }
933
934         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
935                                            error, filter);
936         if (ret != 0)
937                 goto free_filter;
938
939         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
940                 filter->enables |=
941                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
942                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
943         }
944         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
945                 filter->enables |=
946                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
947                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
948         }
949
950         for (i = 0; i < bp->nr_vnics; i++) {
951                 vnic = &bp->vnic_info[i];
952                 if (filter->dst_id == vnic->fw_vnic_id)
953                         break;
954         }
955
956         if (!ret) {
957                 flow->filter = filter;
958                 flow->vnic = vnic;
959                 RTE_LOG(ERR, PMD, "Successfully created flow.\n");
960                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
961                 return flow;
962         }
963 free_filter:
964         bnxt_free_filter(bp, filter);
965 free_flow:
966         RTE_LOG(ERR, PMD, "Failed to create flow.\n");
967         rte_flow_error_set(error, -ret,
968                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
969                            "Failed to create flow.");
970         rte_free(flow);
971         flow = NULL;
972         return flow;
973 }
974
975 static int
976 bnxt_flow_destroy(struct rte_eth_dev *dev,
977                   struct rte_flow *flow,
978                   struct rte_flow_error *error)
979 {
980         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
981         struct bnxt_filter_info *filter = flow->filter;
982         struct bnxt_vnic_info *vnic = flow->vnic;
983         int ret = 0;
984
985         if (filter->filter_type == HWRM_CFA_EM_FILTER)
986                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
987         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
988                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
989
990         if (!ret) {
991                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
992                 rte_free(flow);
993         } else {
994                 rte_flow_error_set(error, -ret,
995                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
996                                    "Failed to destroy flow.");
997         }
998
999         return ret;
1000 }
1001
1002 static int
1003 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1004 {
1005         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1006         struct bnxt_vnic_info *vnic;
1007         struct rte_flow *flow;
1008         unsigned int i;
1009         int ret = 0;
1010
1011         for (i = 0; i < bp->nr_vnics; i++) {
1012                 vnic = &bp->vnic_info[i];
1013                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1014                         struct bnxt_filter_info *filter = flow->filter;
1015
1016                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1017                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1018                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1019                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1020
1021                         if (ret) {
1022                                 rte_flow_error_set(error, -ret,
1023                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1024                                                    NULL,
1025                                                    "Failed to flush flow in HW.");
1026                                 return -rte_errno;
1027                         }
1028
1029                         STAILQ_REMOVE(&vnic->flow_list, flow,
1030                                       rte_flow, next);
1031                         rte_free(flow);
1032                 }
1033         }
1034
1035         return ret;
1036 }
1037
1038 const struct rte_flow_ops bnxt_flow_ops = {
1039         .validate = bnxt_flow_validate,
1040         .create = bnxt_flow_create,
1041         .destroy = bnxt_flow_destroy,
1042         .flush = bnxt_flow_flush,
1043 };