net/bnxt: support creating SMAC and inner DMAC filters
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_log.h>
9 #include <rte_malloc.h>
10 #include <rte_flow.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13
14 #include "bnxt.h"
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
18 #include "bnxt_rxq.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
22
23 static int
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25                         const struct rte_flow_item pattern[],
26                         const struct rte_flow_action actions[],
27                         struct rte_flow_error *error)
28 {
29         if (!pattern) {
30                 rte_flow_error_set(error,
31                                    EINVAL,
32                                    RTE_FLOW_ERROR_TYPE_ITEM_NUM,
33                                    NULL,
34                                    "NULL pattern.");
35                 return -rte_errno;
36         }
37
38         if (!actions) {
39                 rte_flow_error_set(error,
40                                    EINVAL,
41                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
42                                    NULL,
43                                    "NULL action.");
44                 return -rte_errno;
45         }
46
47         if (!attr) {
48                 rte_flow_error_set(error,
49                                    EINVAL,
50                                    RTE_FLOW_ERROR_TYPE_ATTR,
51                                    NULL,
52                                    "NULL attribute.");
53                 return -rte_errno;
54         }
55
56         return 0;
57 }
58
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
61 {
62         while (1) {
63                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
64                         return cur;
65                 cur++;
66         }
67 }
68
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
71 {
72         while (1) {
73                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
74                         return cur;
75                 cur++;
76         }
77 }
78
79 static int
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81                        struct rte_flow_error *error __rte_unused)
82 {
83         const struct rte_flow_item *item =
84                 bnxt_flow_non_void_item(pattern);
85         int use_ntuple = 1;
86         bool has_vlan = 0;
87
88         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89                 switch (item->type) {
90                 case RTE_FLOW_ITEM_TYPE_ANY:
91                 case RTE_FLOW_ITEM_TYPE_ETH:
92                         use_ntuple = 0;
93                         break;
94                 case RTE_FLOW_ITEM_TYPE_VLAN:
95                         use_ntuple = 0;
96                         has_vlan = 1;
97                         break;
98                 case RTE_FLOW_ITEM_TYPE_IPV4:
99                 case RTE_FLOW_ITEM_TYPE_IPV6:
100                 case RTE_FLOW_ITEM_TYPE_TCP:
101                 case RTE_FLOW_ITEM_TYPE_UDP:
102                         /* FALLTHROUGH */
103                         /* need ntuple match, reset exact match */
104                         use_ntuple |= 1;
105                         break;
106                 default:
107                         PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
108                         use_ntuple |= 0;
109                 }
110                 item++;
111         }
112
113         if (has_vlan && use_ntuple) {
114                 PMD_DRV_LOG(ERR,
115                             "VLAN flow cannot use NTUPLE filter\n");
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ITEM,
118                                    item,
119                                    "Cannot use VLAN with NTUPLE");
120                 return -rte_errno;
121         }
122
123         return use_ntuple;
124 }
125
126 static int
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128                                   const struct rte_flow_attr *attr,
129                                   const struct rte_flow_item pattern[],
130                                   struct rte_flow_error *error,
131                                   struct bnxt_filter_info *filter)
132 {
133         const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138         const struct rte_flow_item_udp *udp_spec, *udp_mask;
139         const struct rte_flow_item_eth *eth_spec, *eth_mask;
140         const struct rte_flow_item_nvgre *nvgre_spec;
141         const struct rte_flow_item_nvgre *nvgre_mask;
142         const struct rte_flow_item_gre *gre_spec;
143         const struct rte_flow_item_gre *gre_mask;
144         const struct rte_flow_item_vxlan *vxlan_spec;
145         const struct rte_flow_item_vxlan *vxlan_mask;
146         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148         const struct rte_flow_item_vf *vf_spec;
149         uint32_t tenant_id_be = 0, valid_flags = 0;
150         bool vni_masked = 0;
151         bool tni_masked = 0;
152         uint32_t en_ethertype;
153         uint8_t inner = 0;
154         uint32_t vf = 0;
155         uint32_t en = 0;
156         int use_ntuple;
157         int dflt_vnic;
158
159         use_ntuple = bnxt_filter_type_check(pattern, error);
160         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
161         if (use_ntuple < 0)
162                 return use_ntuple;
163
164         filter->filter_type = use_ntuple ?
165                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166         en_ethertype = use_ntuple ?
167                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169
170         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171                 if (item->last) {
172                         /* last or range is NOT supported as match criteria */
173                         rte_flow_error_set(error, EINVAL,
174                                            RTE_FLOW_ERROR_TYPE_ITEM,
175                                            item,
176                                            "No support for range");
177                         return -rte_errno;
178                 }
179
180                 switch (item->type) {
181                 case RTE_FLOW_ITEM_TYPE_ANY:
182                         inner =
183                         ((const struct rte_flow_item_any *)item->spec)->num > 3;
184                         if (inner)
185                                 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
186                         break;
187                 case RTE_FLOW_ITEM_TYPE_ETH:
188                         if (!item->spec || !item->mask)
189                                 break;
190
191                         eth_spec = item->spec;
192                         eth_mask = item->mask;
193
194                         /* Source MAC address mask cannot be partially set.
195                          * Should be All 0's or all 1's.
196                          * Destination MAC address mask must not be partially
197                          * set. Should be all 1's or all 0's.
198                          */
199                         if ((!rte_is_zero_ether_addr(&eth_mask->src) &&
200                              !rte_is_broadcast_ether_addr(&eth_mask->src)) ||
201                             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
202                              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
203                                 rte_flow_error_set(error,
204                                                    EINVAL,
205                                                    RTE_FLOW_ERROR_TYPE_ITEM,
206                                                    item,
207                                                    "MAC_addr mask not valid");
208                                 return -rte_errno;
209                         }
210
211                         /* Mask is not allowed. Only exact matches are */
212                         if (eth_mask->type &&
213                             eth_mask->type != RTE_BE16(0xffff)) {
214                                 rte_flow_error_set(error, EINVAL,
215                                                    RTE_FLOW_ERROR_TYPE_ITEM,
216                                                    item,
217                                                    "ethertype mask not valid");
218                                 return -rte_errno;
219                         }
220
221                         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
222                                 rte_memcpy(filter->dst_macaddr,
223                                            &eth_spec->dst, RTE_ETHER_ADDR_LEN);
224                                 en |= use_ntuple ?
225                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
226                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
227                                 valid_flags |= inner ?
228                                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
229                                         BNXT_FLOW_L2_DST_VALID_FLAG;
230                         }
231
232                         if (rte_is_broadcast_ether_addr(&eth_mask->src)) {
233                                 rte_memcpy(filter->src_macaddr,
234                                            &eth_spec->src, RTE_ETHER_ADDR_LEN);
235                                 en |= use_ntuple ?
236                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
237                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
238                                 valid_flags |= inner ?
239                                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
240                                         BNXT_FLOW_L2_SRC_VALID_FLAG;
241                         } /*
242                            * else {
243                            *  PMD_DRV_LOG(ERR, "Handle this condition\n");
244                            * }
245                            */
246                         if (eth_mask->type) {
247                                 filter->ethertype =
248                                         rte_be_to_cpu_16(eth_spec->type);
249                                 en |= en_ethertype;
250                         }
251
252                         break;
253                 case RTE_FLOW_ITEM_TYPE_VLAN:
254                         vlan_spec = item->spec;
255                         vlan_mask = item->mask;
256                         if (en & en_ethertype) {
257                                 rte_flow_error_set(error, EINVAL,
258                                                    RTE_FLOW_ERROR_TYPE_ITEM,
259                                                    item,
260                                                    "VLAN TPID matching is not"
261                                                    " supported");
262                                 return -rte_errno;
263                         }
264                         if (vlan_mask->tci &&
265                             vlan_mask->tci == RTE_BE16(0x0fff)) {
266                                 /* Only the VLAN ID can be matched. */
267                                 filter->l2_ovlan =
268                                         rte_be_to_cpu_16(vlan_spec->tci &
269                                                          RTE_BE16(0x0fff));
270                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
271                         } else {
272                                 rte_flow_error_set(error,
273                                                    EINVAL,
274                                                    RTE_FLOW_ERROR_TYPE_ITEM,
275                                                    item,
276                                                    "VLAN mask is invalid");
277                                 return -rte_errno;
278                         }
279                         if (vlan_mask->inner_type &&
280                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
281                                 rte_flow_error_set(error, EINVAL,
282                                                    RTE_FLOW_ERROR_TYPE_ITEM,
283                                                    item,
284                                                    "inner ethertype mask not"
285                                                    " valid");
286                                 return -rte_errno;
287                         }
288                         if (vlan_mask->inner_type) {
289                                 filter->ethertype =
290                                         rte_be_to_cpu_16(vlan_spec->inner_type);
291                                 en |= en_ethertype;
292                         }
293
294                         break;
295                 case RTE_FLOW_ITEM_TYPE_IPV4:
296                         /* If mask is not involved, we could use EM filters. */
297                         ipv4_spec = item->spec;
298                         ipv4_mask = item->mask;
299
300                         if (!item->spec || !item->mask)
301                                 break;
302
303                         /* Only IP DST and SRC fields are maskable. */
304                         if (ipv4_mask->hdr.version_ihl ||
305                             ipv4_mask->hdr.type_of_service ||
306                             ipv4_mask->hdr.total_length ||
307                             ipv4_mask->hdr.packet_id ||
308                             ipv4_mask->hdr.fragment_offset ||
309                             ipv4_mask->hdr.time_to_live ||
310                             ipv4_mask->hdr.next_proto_id ||
311                             ipv4_mask->hdr.hdr_checksum) {
312                                 rte_flow_error_set(error,
313                                                    EINVAL,
314                                                    RTE_FLOW_ERROR_TYPE_ITEM,
315                                                    item,
316                                                    "Invalid IPv4 mask.");
317                                 return -rte_errno;
318                         }
319
320                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
321                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
322
323                         if (use_ntuple)
324                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
325                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
326                         else
327                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
328                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
329
330                         if (ipv4_mask->hdr.src_addr) {
331                                 filter->src_ipaddr_mask[0] =
332                                         ipv4_mask->hdr.src_addr;
333                                 en |= !use_ntuple ? 0 :
334                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
335                         }
336
337                         if (ipv4_mask->hdr.dst_addr) {
338                                 filter->dst_ipaddr_mask[0] =
339                                         ipv4_mask->hdr.dst_addr;
340                                 en |= !use_ntuple ? 0 :
341                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
342                         }
343
344                         filter->ip_addr_type = use_ntuple ?
345                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
346                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
347
348                         if (ipv4_spec->hdr.next_proto_id) {
349                                 filter->ip_protocol =
350                                         ipv4_spec->hdr.next_proto_id;
351                                 if (use_ntuple)
352                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
353                                 else
354                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
355                         }
356                         break;
357                 case RTE_FLOW_ITEM_TYPE_IPV6:
358                         ipv6_spec = item->spec;
359                         ipv6_mask = item->mask;
360
361                         if (!item->spec || !item->mask)
362                                 break;
363
364                         /* Only IP DST and SRC fields are maskable. */
365                         if (ipv6_mask->hdr.vtc_flow ||
366                             ipv6_mask->hdr.payload_len ||
367                             ipv6_mask->hdr.proto ||
368                             ipv6_mask->hdr.hop_limits) {
369                                 rte_flow_error_set(error,
370                                                    EINVAL,
371                                                    RTE_FLOW_ERROR_TYPE_ITEM,
372                                                    item,
373                                                    "Invalid IPv6 mask.");
374                                 return -rte_errno;
375                         }
376
377                         if (use_ntuple)
378                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
379                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
380                         else
381                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
382                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
383
384                         rte_memcpy(filter->src_ipaddr,
385                                    ipv6_spec->hdr.src_addr, 16);
386                         rte_memcpy(filter->dst_ipaddr,
387                                    ipv6_spec->hdr.dst_addr, 16);
388
389                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
390                                                    16)) {
391                                 rte_memcpy(filter->src_ipaddr_mask,
392                                            ipv6_mask->hdr.src_addr, 16);
393                                 en |= !use_ntuple ? 0 :
394                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
395                         }
396
397                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
398                                                    16)) {
399                                 rte_memcpy(filter->dst_ipaddr_mask,
400                                            ipv6_mask->hdr.dst_addr, 16);
401                                 en |= !use_ntuple ? 0 :
402                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
403                         }
404
405                         filter->ip_addr_type = use_ntuple ?
406                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
407                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
408                         break;
409                 case RTE_FLOW_ITEM_TYPE_TCP:
410                         tcp_spec = item->spec;
411                         tcp_mask = item->mask;
412
413                         if (!item->spec || !item->mask)
414                                 break;
415
416                         /* Check TCP mask. Only DST & SRC ports are maskable */
417                         if (tcp_mask->hdr.sent_seq ||
418                             tcp_mask->hdr.recv_ack ||
419                             tcp_mask->hdr.data_off ||
420                             tcp_mask->hdr.tcp_flags ||
421                             tcp_mask->hdr.rx_win ||
422                             tcp_mask->hdr.cksum ||
423                             tcp_mask->hdr.tcp_urp) {
424                                 rte_flow_error_set(error,
425                                                    EINVAL,
426                                                    RTE_FLOW_ERROR_TYPE_ITEM,
427                                                    item,
428                                                    "Invalid TCP mask");
429                                 return -rte_errno;
430                         }
431
432                         filter->src_port = tcp_spec->hdr.src_port;
433                         filter->dst_port = tcp_spec->hdr.dst_port;
434
435                         if (use_ntuple)
436                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
437                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
438                         else
439                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
440                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
441
442                         if (tcp_mask->hdr.dst_port) {
443                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
444                                 en |= !use_ntuple ? 0 :
445                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
446                         }
447
448                         if (tcp_mask->hdr.src_port) {
449                                 filter->src_port_mask = tcp_mask->hdr.src_port;
450                                 en |= !use_ntuple ? 0 :
451                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
452                         }
453                         break;
454                 case RTE_FLOW_ITEM_TYPE_UDP:
455                         udp_spec = item->spec;
456                         udp_mask = item->mask;
457
458                         if (!item->spec || !item->mask)
459                                 break;
460
461                         if (udp_mask->hdr.dgram_len ||
462                             udp_mask->hdr.dgram_cksum) {
463                                 rte_flow_error_set(error,
464                                                    EINVAL,
465                                                    RTE_FLOW_ERROR_TYPE_ITEM,
466                                                    item,
467                                                    "Invalid UDP mask");
468                                 return -rte_errno;
469                         }
470
471                         filter->src_port = udp_spec->hdr.src_port;
472                         filter->dst_port = udp_spec->hdr.dst_port;
473
474                         if (use_ntuple)
475                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
476                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
477                         else
478                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
479                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
480
481                         if (udp_mask->hdr.dst_port) {
482                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
483                                 en |= !use_ntuple ? 0 :
484                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
485                         }
486
487                         if (udp_mask->hdr.src_port) {
488                                 filter->src_port_mask = udp_mask->hdr.src_port;
489                                 en |= !use_ntuple ? 0 :
490                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
491                         }
492                         break;
493                 case RTE_FLOW_ITEM_TYPE_VXLAN:
494                         vxlan_spec = item->spec;
495                         vxlan_mask = item->mask;
496                         /* Check if VXLAN item is used to describe protocol.
497                          * If yes, both spec and mask should be NULL.
498                          * If no, both spec and mask shouldn't be NULL.
499                          */
500                         if ((!vxlan_spec && vxlan_mask) ||
501                             (vxlan_spec && !vxlan_mask)) {
502                                 rte_flow_error_set(error,
503                                                    EINVAL,
504                                                    RTE_FLOW_ERROR_TYPE_ITEM,
505                                                    item,
506                                                    "Invalid VXLAN item");
507                                 return -rte_errno;
508                         }
509
510                         if (!vxlan_spec && !vxlan_mask) {
511                                 filter->tunnel_type =
512                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
513                                 break;
514                         }
515
516                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
517                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
518                             vxlan_spec->flags != 0x8) {
519                                 rte_flow_error_set(error,
520                                                    EINVAL,
521                                                    RTE_FLOW_ERROR_TYPE_ITEM,
522                                                    item,
523                                                    "Invalid VXLAN item");
524                                 return -rte_errno;
525                         }
526
527                         /* Check if VNI is masked. */
528                         if (vxlan_spec && vxlan_mask) {
529                                 vni_masked =
530                                         !!memcmp(vxlan_mask->vni, vni_mask,
531                                                  RTE_DIM(vni_mask));
532                                 if (vni_masked) {
533                                         rte_flow_error_set
534                                                 (error,
535                                                  EINVAL,
536                                                  RTE_FLOW_ERROR_TYPE_ITEM,
537                                                  item,
538                                                  "Invalid VNI mask");
539                                         return -rte_errno;
540                                 }
541
542                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
543                                            vxlan_spec->vni, 3);
544                                 filter->vni =
545                                         rte_be_to_cpu_32(tenant_id_be);
546                                 filter->tunnel_type =
547                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
548                         }
549                         break;
550                 case RTE_FLOW_ITEM_TYPE_NVGRE:
551                         nvgre_spec = item->spec;
552                         nvgre_mask = item->mask;
553                         /* Check if NVGRE item is used to describe protocol.
554                          * If yes, both spec and mask should be NULL.
555                          * If no, both spec and mask shouldn't be NULL.
556                          */
557                         if ((!nvgre_spec && nvgre_mask) ||
558                             (nvgre_spec && !nvgre_mask)) {
559                                 rte_flow_error_set(error,
560                                                    EINVAL,
561                                                    RTE_FLOW_ERROR_TYPE_ITEM,
562                                                    item,
563                                                    "Invalid NVGRE item");
564                                 return -rte_errno;
565                         }
566
567                         if (!nvgre_spec && !nvgre_mask) {
568                                 filter->tunnel_type =
569                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
570                                 break;
571                         }
572
573                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
574                             nvgre_spec->protocol != 0x6558) {
575                                 rte_flow_error_set(error,
576                                                    EINVAL,
577                                                    RTE_FLOW_ERROR_TYPE_ITEM,
578                                                    item,
579                                                    "Invalid NVGRE item");
580                                 return -rte_errno;
581                         }
582
583                         if (nvgre_spec && nvgre_mask) {
584                                 tni_masked =
585                                         !!memcmp(nvgre_mask->tni, tni_mask,
586                                                  RTE_DIM(tni_mask));
587                                 if (tni_masked) {
588                                         rte_flow_error_set
589                                                 (error,
590                                                  EINVAL,
591                                                  RTE_FLOW_ERROR_TYPE_ITEM,
592                                                  item,
593                                                  "Invalid TNI mask");
594                                         return -rte_errno;
595                                 }
596                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
597                                            nvgre_spec->tni, 3);
598                                 filter->vni =
599                                         rte_be_to_cpu_32(tenant_id_be);
600                                 filter->tunnel_type =
601                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
602                         }
603                         break;
604
605                 case RTE_FLOW_ITEM_TYPE_GRE:
606                         gre_spec = (const struct rte_flow_item_gre *)item->spec;
607                         gre_mask = (const struct rte_flow_item_gre *)item->mask;
608
609                         /*
610                          *Check if GRE item is used to describe protocol.
611                          * If yes, both spec and mask should be NULL.
612                          * If no, both spec and mask shouldn't be NULL.
613                          */
614                         if (!!gre_spec ^ !!gre_mask) {
615                                 rte_flow_error_set(error, EINVAL,
616                                                    RTE_FLOW_ERROR_TYPE_ITEM,
617                                                    item,
618                                                    "Invalid GRE item");
619                                 return -rte_errno;
620                         }
621
622                         if (!gre_spec && !gre_mask) {
623                                 filter->tunnel_type =
624                                 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
625                                 break;
626                         }
627                         break;
628
629                 case RTE_FLOW_ITEM_TYPE_VF:
630                         vf_spec = item->spec;
631                         vf = vf_spec->id;
632                         if (!BNXT_PF(bp)) {
633                                 rte_flow_error_set(error,
634                                                    EINVAL,
635                                                    RTE_FLOW_ERROR_TYPE_ITEM,
636                                                    item,
637                                                    "Configuring on a VF!");
638                                 return -rte_errno;
639                         }
640
641                         if (vf >= bp->pdev->max_vfs) {
642                                 rte_flow_error_set(error,
643                                                    EINVAL,
644                                                    RTE_FLOW_ERROR_TYPE_ITEM,
645                                                    item,
646                                                    "Incorrect VF id!");
647                                 return -rte_errno;
648                         }
649
650                         if (!attr->transfer) {
651                                 rte_flow_error_set(error,
652                                                    ENOTSUP,
653                                                    RTE_FLOW_ERROR_TYPE_ITEM,
654                                                    item,
655                                                    "Matching VF traffic without"
656                                                    " affecting it (transfer attribute)"
657                                                    " is unsupported");
658                                 return -rte_errno;
659                         }
660
661                         filter->mirror_vnic_id =
662                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
663                         if (dflt_vnic < 0) {
664                                 /* This simply indicates there's no driver
665                                  * loaded. This is not an error.
666                                  */
667                                 rte_flow_error_set
668                                         (error,
669                                          EINVAL,
670                                          RTE_FLOW_ERROR_TYPE_ITEM,
671                                          item,
672                                          "Unable to get default VNIC for VF");
673                                 return -rte_errno;
674                         }
675
676                         filter->mirror_vnic_id = dflt_vnic;
677                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
678                         break;
679                 default:
680                         break;
681                 }
682                 item++;
683         }
684         filter->enables = en;
685         filter->valid_flags = valid_flags;
686
687         return 0;
688 }
689
690 /* Parse attributes */
691 static int
692 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
693                      struct rte_flow_error *error)
694 {
695         /* Must be input direction */
696         if (!attr->ingress) {
697                 rte_flow_error_set(error,
698                                    EINVAL,
699                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
700                                    attr,
701                                    "Only support ingress.");
702                 return -rte_errno;
703         }
704
705         /* Not supported */
706         if (attr->egress) {
707                 rte_flow_error_set(error,
708                                    EINVAL,
709                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
710                                    attr,
711                                    "No support for egress.");
712                 return -rte_errno;
713         }
714
715         /* Not supported */
716         if (attr->priority) {
717                 rte_flow_error_set(error,
718                                    EINVAL,
719                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
720                                    attr,
721                                    "No support for priority.");
722                 return -rte_errno;
723         }
724         return 0;
725 }
726
727 struct bnxt_filter_info *
728 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
729                    struct bnxt_vnic_info *vnic)
730 {
731         struct bnxt_filter_info *filter1, *f0;
732         struct bnxt_vnic_info *vnic0;
733         int rc;
734
735         vnic0 = &bp->vnic_info[0];
736         f0 = STAILQ_FIRST(&vnic0->filter);
737
738         /* This flow has same DST MAC as the port/l2 filter. */
739         if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
740                 return f0;
741
742         /* Alloc new L2 filter.
743          * This flow needs MAC filter which does not match port/l2 MAC.
744          */
745         filter1 = bnxt_get_unused_filter(bp);
746         if (filter1 == NULL)
747                 return NULL;
748
749         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
750         filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
751         if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
752             nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
753                 filter1->flags |=
754                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
755                 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
756         }
757
758         if (nf->filter_type == HWRM_CFA_L2_FILTER &&
759             (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
760              nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
761                 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
762                 filter1->flags |=
763                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
764                 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
765         } else {
766                 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
767                 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
768         }
769
770         if (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
771             nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG) {
772                 /* Tell the FW where to place the filter in the table. */
773                 filter1->pri_hint =
774                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
775                 /* This will place the filter in TCAM */
776                 filter1->l2_filter_id_hint = (uint64_t)-1;
777         }
778
779         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
780                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
781         memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
782         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
783                                      filter1);
784         if (rc) {
785                 bnxt_free_filter(bp, filter1);
786                 return NULL;
787         }
788         return filter1;
789 }
790
791 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
792 {
793         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
794         uint64_t rx_offloads = dev_conf->rxmode.offloads;
795         int rc;
796
797         rc = bnxt_vnic_grp_alloc(bp, vnic);
798         if (rc)
799                 goto ret;
800
801         rc = bnxt_hwrm_vnic_alloc(bp, vnic);
802         if (rc) {
803                 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
804                 goto ret;
805         }
806         bp->nr_vnics++;
807
808         /* RSS context is required only when there is more than one RSS ring */
809         if (vnic->rx_queue_cnt > 1) {
810                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
811                 if (rc) {
812                         PMD_DRV_LOG(ERR,
813                                     "HWRM vnic ctx alloc failure: %x\n", rc);
814                         goto ret;
815                 }
816         } else {
817                 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
818         }
819
820         if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
821                 vnic->vlan_strip = true;
822         else
823                 vnic->vlan_strip = false;
824
825         rc = bnxt_hwrm_vnic_cfg(bp, vnic);
826         if (rc)
827                 goto ret;
828
829         bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
830
831 ret:
832         return rc;
833 }
834
835 static int
836 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
837                              const struct rte_flow_item pattern[],
838                              const struct rte_flow_action actions[],
839                              const struct rte_flow_attr *attr,
840                              struct rte_flow_error *error,
841                              struct bnxt_filter_info *filter)
842 {
843         const struct rte_flow_action *act =
844                 bnxt_flow_non_void_action(actions);
845         struct bnxt *bp = dev->data->dev_private;
846         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
847         const struct rte_flow_action_queue *act_q;
848         const struct rte_flow_action_vf *act_vf;
849         struct bnxt_vnic_info *vnic, *vnic0;
850         struct bnxt_filter_info *filter1;
851         struct bnxt_rx_queue *rxq = NULL;
852         int dflt_vnic, vnic_id;
853         uint32_t vf = 0;
854         int rc;
855
856         rc =
857         bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
858         if (rc != 0)
859                 goto ret;
860
861         rc = bnxt_flow_parse_attr(attr, error);
862         if (rc != 0)
863                 goto ret;
864
865         /* Since we support ingress attribute only - right now. */
866         if (filter->filter_type == HWRM_CFA_EM_FILTER)
867                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
868
869         switch (act->type) {
870         case RTE_FLOW_ACTION_TYPE_QUEUE:
871                 /* Allow this flow. Redirect to a VNIC. */
872                 act_q = (const struct rte_flow_action_queue *)act->conf;
873                 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
874                         rte_flow_error_set(error,
875                                            EINVAL,
876                                            RTE_FLOW_ERROR_TYPE_ACTION,
877                                            act,
878                                            "Invalid queue ID.");
879                         rc = -rte_errno;
880                         goto ret;
881                 }
882                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
883
884                 vnic_id = attr->group;
885                 if (!vnic_id) {
886                         PMD_DRV_LOG(DEBUG, "Group id is 0\n");
887                         vnic_id = act_q->index;
888                 }
889                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
890
891                 vnic = &bp->vnic_info[vnic_id];
892                 if (vnic == NULL) {
893                         rte_flow_error_set(error,
894                                            EINVAL,
895                                            RTE_FLOW_ERROR_TYPE_ACTION,
896                                            act,
897                                            "No matching VNIC found.");
898                         rc = -rte_errno;
899                         goto ret;
900                 }
901                 if (vnic->rx_queue_cnt) {
902                         if (vnic->start_grp_id != act_q->index) {
903                                 PMD_DRV_LOG(ERR,
904                                             "VNIC already in use\n");
905                                 rte_flow_error_set(error,
906                                                    EINVAL,
907                                                    RTE_FLOW_ERROR_TYPE_ACTION,
908                                                    act,
909                                                    "VNIC already in use");
910                                 rc = -rte_errno;
911                                 goto ret;
912                         }
913                         goto use_vnic;
914                 }
915
916                 rxq = bp->rx_queues[act_q->index];
917
918                 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
919                     vnic->fw_vnic_id != INVALID_HW_RING_ID)
920                         goto use_vnic;
921
922                 if (!rxq ||
923                     bp->vnic_info[0].fw_grp_ids[act_q->index] !=
924                     INVALID_HW_RING_ID ||
925                     !rxq->rx_deferred_start) {
926                         PMD_DRV_LOG(ERR,
927                                     "Queue invalid or used with other VNIC\n");
928                         rte_flow_error_set(error,
929                                            EINVAL,
930                                            RTE_FLOW_ERROR_TYPE_ACTION,
931                                            act,
932                                            "Queue invalid queue or in use");
933                         rc = -rte_errno;
934                         goto ret;
935                 }
936
937 use_vnic:
938                 rxq->vnic = vnic;
939                 vnic->rx_queue_cnt++;
940                 vnic->start_grp_id = act_q->index;
941                 vnic->end_grp_id = act_q->index;
942                 vnic->func_default = 0; //This is not a default VNIC.
943
944                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
945
946                 rc = bnxt_vnic_prep(bp, vnic);
947                 if (rc)
948                         goto ret;
949
950                 PMD_DRV_LOG(DEBUG,
951                             "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
952                             act_q->index, vnic, vnic->fw_grp_ids);
953
954                 vnic->ff_pool_idx = vnic_id;
955                 PMD_DRV_LOG(DEBUG,
956                             "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
957                 filter->dst_id = vnic->fw_vnic_id;
958                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
959                 if (filter1 == NULL) {
960                         rc = -ENOSPC;
961                         goto ret;
962                 }
963
964                 if (!(filter->valid_flags &
965                       ~(BNXT_FLOW_L2_DST_VALID_FLAG |
966                         BNXT_FLOW_L2_SRC_VALID_FLAG |
967                         BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
968                         BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
969                         PMD_DRV_LOG(DEBUG, "L2 filter created\n");
970                         filter->flags = filter1->flags;
971                         filter->enables = filter1->enables;
972                         filter->filter_type = HWRM_CFA_L2_FILTER;
973                         memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
974                         filter->pri_hint = filter1->pri_hint;
975                         filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
976                 }
977                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
978                 break;
979         case RTE_FLOW_ACTION_TYPE_DROP:
980                 vnic0 = &bp->vnic_info[0];
981                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
982                 if (filter1 == NULL) {
983                         rc = -ENOSPC;
984                         goto ret;
985                 }
986
987                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
988                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
989                         filter->flags =
990                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
991                 else
992                         filter->flags =
993                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
994                 break;
995         case RTE_FLOW_ACTION_TYPE_COUNT:
996                 vnic0 = &bp->vnic_info[0];
997                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
998                 if (filter1 == NULL) {
999                         rc = -ENOSPC;
1000                         goto ret;
1001                 }
1002
1003                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1004                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1005                 break;
1006         case RTE_FLOW_ACTION_TYPE_VF:
1007                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1008                 vf = act_vf->id;
1009
1010                 if (filter->tunnel_type ==
1011                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1012                     filter->tunnel_type ==
1013                     CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1014                         /* If issued on a VF, ensure id is 0 and is trusted */
1015                         if (BNXT_VF(bp)) {
1016                                 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1017                                         rte_flow_error_set(error, EINVAL,
1018                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1019                                                 act,
1020                                                 "Incorrect VF");
1021                                         rc = -rte_errno;
1022                                         goto ret;
1023                                 }
1024                         }
1025
1026                         filter->enables |= filter->tunnel_type;
1027                         filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1028                         goto done;
1029                 }
1030
1031                 if (vf >= bp->pdev->max_vfs) {
1032                         rte_flow_error_set(error,
1033                                            EINVAL,
1034                                            RTE_FLOW_ERROR_TYPE_ACTION,
1035                                            act,
1036                                            "Incorrect VF id!");
1037                         rc = -rte_errno;
1038                         goto ret;
1039                 }
1040
1041                 filter->mirror_vnic_id =
1042                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1043                 if (dflt_vnic < 0) {
1044                         /* This simply indicates there's no driver loaded.
1045                          * This is not an error.
1046                          */
1047                         rte_flow_error_set(error,
1048                                            EINVAL,
1049                                            RTE_FLOW_ERROR_TYPE_ACTION,
1050                                            act,
1051                                            "Unable to get default VNIC for VF");
1052                         rc = -rte_errno;
1053                         goto ret;
1054                 }
1055
1056                 filter->mirror_vnic_id = dflt_vnic;
1057                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1058
1059                 vnic0 = &bp->vnic_info[0];
1060                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1061                 if (filter1 == NULL) {
1062                         rc = -ENOSPC;
1063                         goto ret;
1064                 }
1065
1066                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1067                 break;
1068         case RTE_FLOW_ACTION_TYPE_RSS:
1069                 rte_flow_error_set(error,
1070                                    ENOTSUP,
1071                                    RTE_FLOW_ERROR_TYPE_ACTION,
1072                                    act,
1073                                    "This action is not supported right now.");
1074                 rc = -rte_errno;
1075                 goto ret;
1076                 //break;
1077
1078         default:
1079                 rte_flow_error_set(error,
1080                                    EINVAL,
1081                                    RTE_FLOW_ERROR_TYPE_ACTION,
1082                                    act,
1083                                    "Invalid action.");
1084                 rc = -rte_errno;
1085                 goto ret;
1086         }
1087
1088         if (filter1) {
1089                 bnxt_free_filter(bp, filter1);
1090                 filter1->fw_l2_filter_id = -1;
1091         }
1092 done:
1093         act = bnxt_flow_non_void_action(++act);
1094         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1095                 rte_flow_error_set(error,
1096                                    EINVAL,
1097                                    RTE_FLOW_ERROR_TYPE_ACTION,
1098                                    act,
1099                                    "Invalid action.");
1100                 rc = -rte_errno;
1101                 goto ret;
1102         }
1103 ret:
1104         return rc;
1105 }
1106
1107 static int
1108 bnxt_flow_validate(struct rte_eth_dev *dev,
1109                    const struct rte_flow_attr *attr,
1110                    const struct rte_flow_item pattern[],
1111                    const struct rte_flow_action actions[],
1112                    struct rte_flow_error *error)
1113 {
1114         struct bnxt *bp = dev->data->dev_private;
1115         struct bnxt_filter_info *filter;
1116         int ret = 0;
1117
1118         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1119         if (ret != 0)
1120                 return ret;
1121
1122         filter = bnxt_get_unused_filter(bp);
1123         if (filter == NULL) {
1124                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1125                 return -ENOMEM;
1126         }
1127
1128         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1129                                            error, filter);
1130         /* No need to hold on to this filter if we are just validating flow */
1131         filter->fw_l2_filter_id = UINT64_MAX;
1132         bnxt_free_filter(bp, filter);
1133
1134         return ret;
1135 }
1136
1137 static int
1138 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1139 {
1140         struct bnxt_filter_info *mf;
1141         struct rte_flow *flow;
1142         int i;
1143
1144         for (i = bp->max_vnics; i >= 0; i--) {
1145                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1146
1147                 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1148                         continue;
1149
1150                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1151                         mf = flow->filter;
1152
1153                         if (mf->filter_type == nf->filter_type &&
1154                             mf->flags == nf->flags &&
1155                             mf->src_port == nf->src_port &&
1156                             mf->src_port_mask == nf->src_port_mask &&
1157                             mf->dst_port == nf->dst_port &&
1158                             mf->dst_port_mask == nf->dst_port_mask &&
1159                             mf->ip_protocol == nf->ip_protocol &&
1160                             mf->ip_addr_type == nf->ip_addr_type &&
1161                             mf->ethertype == nf->ethertype &&
1162                             mf->vni == nf->vni &&
1163                             mf->tunnel_type == nf->tunnel_type &&
1164                             mf->l2_ovlan == nf->l2_ovlan &&
1165                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1166                             mf->l2_ivlan == nf->l2_ivlan &&
1167                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1168                             !memcmp(mf->l2_addr, nf->l2_addr,
1169                                     RTE_ETHER_ADDR_LEN) &&
1170                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1171                                     RTE_ETHER_ADDR_LEN) &&
1172                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1173                                     RTE_ETHER_ADDR_LEN) &&
1174                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1175                                     RTE_ETHER_ADDR_LEN) &&
1176                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1177                                     sizeof(nf->src_ipaddr)) &&
1178                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1179                                     sizeof(nf->src_ipaddr_mask)) &&
1180                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1181                                     sizeof(nf->dst_ipaddr)) &&
1182                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1183                                     sizeof(nf->dst_ipaddr_mask))) {
1184                                 if (mf->dst_id == nf->dst_id)
1185                                         return -EEXIST;
1186                                 /* Clear the new L2 filter that was created
1187                                  * earlier in bnxt_validate_and_parse_flow.
1188                                  */
1189                                 bnxt_hwrm_clear_l2_filter(bp, nf);
1190                                 /*
1191                                  * Same Flow, Different queue
1192                                  * Clear the old ntuple filter
1193                                  * Reuse the matching L2 filter
1194                                  * ID for the new filter
1195                                  */
1196                                 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1197                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1198                                         bnxt_hwrm_clear_em_filter(bp, mf);
1199                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1200                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1201                                 /* Free the old filter, update flow
1202                                  * with new filter
1203                                  */
1204                                 bnxt_free_filter(bp, mf);
1205                                 flow->filter = nf;
1206                                 return -EXDEV;
1207                         }
1208                 }
1209         }
1210         return 0;
1211 }
1212
1213 static struct rte_flow *
1214 bnxt_flow_create(struct rte_eth_dev *dev,
1215                  const struct rte_flow_attr *attr,
1216                  const struct rte_flow_item pattern[],
1217                  const struct rte_flow_action actions[],
1218                  struct rte_flow_error *error)
1219 {
1220         struct bnxt *bp = dev->data->dev_private;
1221         struct bnxt_vnic_info *vnic = NULL;
1222         struct bnxt_filter_info *filter;
1223         bool update_flow = false;
1224         struct rte_flow *flow;
1225         unsigned int i;
1226         int ret = 0;
1227         uint32_t tun_type;
1228
1229         if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1230                 rte_flow_error_set(error, EINVAL,
1231                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1232                                    "Failed to create flow, Not a Trusted VF!");
1233                 return NULL;
1234         }
1235
1236         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1237         if (!flow) {
1238                 rte_flow_error_set(error, ENOMEM,
1239                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1240                                    "Failed to allocate memory");
1241                 return flow;
1242         }
1243
1244         ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1245         if (ret != 0) {
1246                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1247                 goto free_flow;
1248         }
1249
1250         filter = bnxt_get_unused_filter(bp);
1251         if (filter == NULL) {
1252                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1253                 goto free_flow;
1254         }
1255
1256         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1257                                            error, filter);
1258         if (ret != 0)
1259                 goto free_filter;
1260
1261         ret = bnxt_match_filter(bp, filter);
1262         if (ret == -EEXIST) {
1263                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1264                 /* Clear the filter that was created as part of
1265                  * validate_and_parse_flow() above
1266                  */
1267                 bnxt_hwrm_clear_l2_filter(bp, filter);
1268                 goto free_filter;
1269         } else if (ret == -EXDEV) {
1270                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1271                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1272                 update_flow = true;
1273         }
1274
1275         /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1276          * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1277          * in such a case.
1278          */
1279         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1280             filter->enables == filter->tunnel_type) {
1281                 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1282                 if (ret) {
1283                         rte_flow_error_set(error, -ret,
1284                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1285                                            "Unable to query tunnel to VF");
1286                         goto free_filter;
1287                 }
1288                 if (tun_type == (1U << filter->tunnel_type)) {
1289                         ret =
1290                         bnxt_hwrm_tunnel_redirect_free(bp,
1291                                                        filter->tunnel_type);
1292                         if (ret) {
1293                                 PMD_DRV_LOG(ERR,
1294                                             "Unable to free existing tunnel\n");
1295                                 rte_flow_error_set(error, -ret,
1296                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1297                                                    NULL,
1298                                                    "Unable to free preexisting "
1299                                                    "tunnel on VF");
1300                                 goto free_filter;
1301                         }
1302                 }
1303                 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1304                 if (ret) {
1305                         rte_flow_error_set(error, -ret,
1306                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1307                                            "Unable to redirect tunnel to VF");
1308                         goto free_filter;
1309                 }
1310                 vnic = &bp->vnic_info[0];
1311                 goto done;
1312         }
1313
1314         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1315                 filter->enables |=
1316                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1317                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1318         }
1319
1320         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1321                 filter->enables |=
1322                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1323                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1324         }
1325
1326         for (i = 0; i < bp->max_vnics; i++) {
1327                 vnic = &bp->vnic_info[i];
1328                 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1329                     filter->dst_id == vnic->fw_vnic_id) {
1330                         PMD_DRV_LOG(ERR, "Found matching VNIC Id %d\n",
1331                                     vnic->ff_pool_idx);
1332                         break;
1333                 }
1334         }
1335 done:
1336         if (!ret) {
1337                 flow->filter = filter;
1338                 flow->vnic = vnic;
1339                 /* VNIC is set only in case of queue or RSS action */
1340                 if (vnic) {
1341                         /*
1342                          * RxQ0 is not used for flow filters.
1343                          */
1344
1345                         if (update_flow) {
1346                                 ret = -EXDEV;
1347                                 goto free_flow;
1348                         }
1349                         STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1350                 }
1351                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1352                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1353                 return flow;
1354         }
1355         if (!ret) {
1356                 flow->filter = filter;
1357                 flow->vnic = vnic;
1358                 if (update_flow) {
1359                         ret = -EXDEV;
1360                         goto free_flow;
1361                 }
1362                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1363                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1364                 return flow;
1365         }
1366 free_filter:
1367         bnxt_free_filter(bp, filter);
1368 free_flow:
1369         if (ret == -EEXIST)
1370                 rte_flow_error_set(error, ret,
1371                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1372                                    "Matching Flow exists.");
1373         else if (ret == -EXDEV)
1374                 rte_flow_error_set(error, ret,
1375                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1376                                    "Flow with pattern exists, updating destination queue");
1377         else if (!rte_errno)
1378                 rte_flow_error_set(error, -ret,
1379                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1380                                    "Failed to create flow.");
1381         rte_free(flow);
1382         flow = NULL;
1383         return flow;
1384 }
1385
1386 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1387                                                struct bnxt_filter_info *filter,
1388                                                struct rte_flow_error *error)
1389 {
1390         uint16_t tun_dst_fid;
1391         uint32_t tun_type;
1392         int ret = 0;
1393
1394         ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1395         if (ret) {
1396                 rte_flow_error_set(error, -ret,
1397                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1398                                    "Unable to query tunnel to VF");
1399                 return ret;
1400         }
1401         if (tun_type == (1U << filter->tunnel_type)) {
1402                 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1403                                                      &tun_dst_fid);
1404                 if (ret) {
1405                         rte_flow_error_set(error, -ret,
1406                                            RTE_FLOW_ERROR_TYPE_HANDLE,
1407                                            NULL,
1408                                            "tunnel_redirect info cmd fail");
1409                         return ret;
1410                 }
1411                 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1412                             tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1413
1414                 /* Tunnel doesn't belong to this VF, so don't send HWRM
1415                  * cmd, just delete the flow from driver
1416                  */
1417                 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1418                         PMD_DRV_LOG(ERR,
1419                                     "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1420                 else
1421                         ret = bnxt_hwrm_tunnel_redirect_free(bp,
1422                                                         filter->tunnel_type);
1423         }
1424         return ret;
1425 }
1426
1427 static int
1428 bnxt_flow_destroy(struct rte_eth_dev *dev,
1429                   struct rte_flow *flow,
1430                   struct rte_flow_error *error)
1431 {
1432         struct bnxt *bp = dev->data->dev_private;
1433         struct bnxt_filter_info *filter = flow->filter;
1434         struct bnxt_vnic_info *vnic = flow->vnic;
1435         int ret = 0;
1436
1437         if (!filter) {
1438                 ret = -EINVAL;
1439                 goto done;
1440         }
1441
1442         if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1443             filter->enables == filter->tunnel_type) {
1444                 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1445                                                           filter,
1446                                                           error);
1447                 if (!ret)
1448                         goto done;
1449                 else
1450                         return ret;
1451         }
1452
1453         ret = bnxt_match_filter(bp, filter);
1454         if (ret == 0)
1455                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1456         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1457                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1458         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1459                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1460         else
1461                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1462
1463 done:
1464         if (!ret) {
1465                 bnxt_free_filter(bp, filter);
1466                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1467                 rte_free(flow);
1468         } else {
1469                 rte_flow_error_set(error, -ret,
1470                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1471                                    "Failed to destroy flow.");
1472         }
1473
1474         return ret;
1475 }
1476
1477 static int
1478 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1479 {
1480         struct bnxt *bp = dev->data->dev_private;
1481         struct bnxt_vnic_info *vnic;
1482         struct rte_flow *flow;
1483         unsigned int i;
1484         int ret = 0;
1485
1486         for (i = 0; i < bp->nr_vnics; i++) {
1487                 vnic = &bp->vnic_info[i];
1488                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1489                         struct bnxt_filter_info *filter = flow->filter;
1490
1491                         if (filter->filter_type ==
1492                             HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1493                             filter->enables == filter->tunnel_type) {
1494                                 ret =
1495                                 bnxt_handle_tunnel_redirect_destroy(bp,
1496                                                                     filter,
1497                                                                     error);
1498                                 if (!ret)
1499                                         goto done;
1500                                 else
1501                                         return ret;
1502                         }
1503
1504                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1505                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1506                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1507                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1508
1509                         if (ret) {
1510                                 rte_flow_error_set
1511                                         (error,
1512                                          -ret,
1513                                          RTE_FLOW_ERROR_TYPE_HANDLE,
1514                                          NULL,
1515                                          "Failed to flush flow in HW.");
1516                                 return -rte_errno;
1517                         }
1518 done:
1519                         bnxt_free_filter(bp, filter);
1520                         STAILQ_REMOVE(&vnic->flow_list, flow,
1521                                       rte_flow, next);
1522                         rte_free(flow);
1523                 }
1524         }
1525
1526         return ret;
1527 }
1528
1529 const struct rte_flow_ops bnxt_flow_ops = {
1530         .validate = bnxt_flow_validate,
1531         .create = bnxt_flow_create,
1532         .destroy = bnxt_flow_destroy,
1533         .flush = bnxt_flow_flush,
1534 };