4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
37 #include <rte_malloc.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
54 struct bnxt_filter_info *filter;
56 /* Find the 1st unused filter from the free_filter_list pool*/
57 filter = STAILQ_FIRST(&bp->free_filter_list);
59 PMD_DRV_LOG(ERR, "No more free filter resources\n");
62 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
64 /* Default to L2 MAC Addr filter */
65 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
70 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
76 struct bnxt_filter_info *filter;
78 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
80 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
85 filter->fw_l2_filter_id = UINT64_MAX;
86 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
90 void bnxt_init_filters(struct bnxt *bp)
92 struct bnxt_filter_info *filter;
95 max_filters = bp->max_l2_ctx;
96 STAILQ_INIT(&bp->free_filter_list);
97 for (i = 0; i < max_filters; i++) {
98 filter = &bp->filter_info[i];
99 filter->fw_l2_filter_id = -1;
100 filter->fw_em_filter_id = -1;
101 filter->fw_ntuple_filter_id = -1;
102 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
106 void bnxt_free_all_filters(struct bnxt *bp)
108 struct bnxt_vnic_info *vnic;
109 struct bnxt_filter_info *filter, *temp_filter;
112 for (i = 0; i < MAX_FF_POOLS; i++) {
113 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114 filter = STAILQ_FIRST(&vnic->filter);
116 temp_filter = STAILQ_NEXT(filter, next);
117 STAILQ_REMOVE(&vnic->filter, filter,
118 bnxt_filter_info, next);
119 STAILQ_INSERT_TAIL(&bp->free_filter_list,
121 filter = temp_filter;
123 STAILQ_INIT(&vnic->filter);
127 for (i = 0; i < bp->pf.max_vfs; i++) {
128 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129 bnxt_hwrm_clear_l2_filter(bp, filter);
134 void bnxt_free_filter_mem(struct bnxt *bp)
136 struct bnxt_filter_info *filter;
137 uint16_t max_filters, i;
140 if (bp->filter_info == NULL)
143 /* Ensure that all filters are freed */
144 max_filters = bp->max_l2_ctx;
145 for (i = 0; i < max_filters; i++) {
146 filter = &bp->filter_info[i];
147 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
149 /* Call HWRM to try to free filter again */
150 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
153 "HWRM filter cannot be freed rc = %d\n",
156 filter->fw_l2_filter_id = UINT64_MAX;
158 STAILQ_INIT(&bp->free_filter_list);
160 rte_free(bp->filter_info);
161 bp->filter_info = NULL;
164 int bnxt_alloc_filter_mem(struct bnxt *bp)
166 struct bnxt_filter_info *filter_mem;
167 uint16_t max_filters;
169 max_filters = bp->max_l2_ctx;
170 /* Allocate memory for VNIC pool and filter pool */
171 filter_mem = rte_zmalloc("bnxt_filter_info",
172 max_filters * sizeof(struct bnxt_filter_info),
174 if (filter_mem == NULL) {
175 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
179 bp->filter_info = filter_mem;
183 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
185 struct bnxt_filter_info *filter;
187 /* Find the 1st unused filter from the free_filter_list pool*/
188 filter = STAILQ_FIRST(&bp->free_filter_list);
190 PMD_DRV_LOG(ERR, "No more free filter resources\n");
193 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
198 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
200 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
204 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
205 const struct rte_flow_item pattern[],
206 const struct rte_flow_action actions[],
207 struct rte_flow_error *error)
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212 NULL, "NULL pattern.");
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219 NULL, "NULL action.");
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ATTR,
226 NULL, "NULL attribute.");
233 static const struct rte_flow_item *
234 nxt_non_void_pattern(const struct rte_flow_item *cur)
237 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
243 static const struct rte_flow_action *
244 nxt_non_void_action(const struct rte_flow_action *cur)
247 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
253 int check_zero_bytes(const uint8_t *bytes, int len)
256 for (i = 0; i < len; i++)
257 if (bytes[i] != 0x00)
263 bnxt_filter_type_check(const struct rte_flow_item pattern[],
264 struct rte_flow_error *error __rte_unused)
266 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
269 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
270 switch (item->type) {
271 case RTE_FLOW_ITEM_TYPE_ETH:
274 case RTE_FLOW_ITEM_TYPE_VLAN:
277 case RTE_FLOW_ITEM_TYPE_IPV4:
278 case RTE_FLOW_ITEM_TYPE_IPV6:
279 case RTE_FLOW_ITEM_TYPE_TCP:
280 case RTE_FLOW_ITEM_TYPE_UDP:
282 /* need ntuple match, reset exact match */
285 "VLAN flow cannot use NTUPLE filter\n");
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "Cannot use VLAN with NTUPLE");
295 PMD_DRV_LOG(ERR, "Unknown Flow type");
304 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
305 const struct rte_flow_item pattern[],
306 struct rte_flow_error *error,
307 struct bnxt_filter_info *filter)
309 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
310 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
311 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
312 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
313 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
314 const struct rte_flow_item_udp *udp_spec, *udp_mask;
315 const struct rte_flow_item_eth *eth_spec, *eth_mask;
316 const struct rte_flow_item_nvgre *nvgre_spec;
317 const struct rte_flow_item_nvgre *nvgre_mask;
318 const struct rte_flow_item_vxlan *vxlan_spec;
319 const struct rte_flow_item_vxlan *vxlan_mask;
320 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
321 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
322 const struct rte_flow_item_vf *vf_spec;
323 uint32_t tenant_id_be = 0;
331 use_ntuple = bnxt_filter_type_check(pattern, error);
332 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
336 filter->filter_type = use_ntuple ?
337 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
339 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
341 /* last or range is NOT supported as match criteria */
342 rte_flow_error_set(error, EINVAL,
343 RTE_FLOW_ERROR_TYPE_ITEM,
345 "No support for range");
348 if (!item->spec || !item->mask) {
349 rte_flow_error_set(error, EINVAL,
350 RTE_FLOW_ERROR_TYPE_ITEM,
352 "spec/mask is NULL");
355 switch (item->type) {
356 case RTE_FLOW_ITEM_TYPE_ETH:
357 eth_spec = item->spec;
358 eth_mask = item->mask;
360 /* Source MAC address mask cannot be partially set.
361 * Should be All 0's or all 1's.
362 * Destination MAC address mask must not be partially
363 * set. Should be all 1's or all 0's.
365 if ((!is_zero_ether_addr(ð_mask->src) &&
366 !is_broadcast_ether_addr(ð_mask->src)) ||
367 (!is_zero_ether_addr(ð_mask->dst) &&
368 !is_broadcast_ether_addr(ð_mask->dst))) {
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
372 "MAC_addr mask not valid");
376 /* Mask is not allowed. Only exact matches are */
377 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_ITEM,
381 "ethertype mask not valid");
385 if (is_broadcast_ether_addr(ð_mask->dst)) {
386 rte_memcpy(filter->dst_macaddr,
389 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
390 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
392 if (is_broadcast_ether_addr(ð_mask->src)) {
393 rte_memcpy(filter->src_macaddr,
396 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
397 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
400 * RTE_LOG(ERR, PMD, "Handle this condition\n");
403 if (eth_spec->type) {
405 rte_be_to_cpu_16(eth_spec->type);
407 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
408 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
412 case RTE_FLOW_ITEM_TYPE_VLAN:
413 vlan_spec = item->spec;
414 vlan_mask = item->mask;
415 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
416 /* Only the VLAN ID can be matched. */
418 rte_be_to_cpu_16(vlan_spec->tci &
420 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
422 rte_flow_error_set(error, EINVAL,
423 RTE_FLOW_ERROR_TYPE_ITEM,
425 "VLAN mask is invalid");
430 case RTE_FLOW_ITEM_TYPE_IPV4:
431 /* If mask is not involved, we could use EM filters. */
432 ipv4_spec = item->spec;
433 ipv4_mask = item->mask;
434 /* Only IP DST and SRC fields are maskable. */
435 if (ipv4_mask->hdr.version_ihl ||
436 ipv4_mask->hdr.type_of_service ||
437 ipv4_mask->hdr.total_length ||
438 ipv4_mask->hdr.packet_id ||
439 ipv4_mask->hdr.fragment_offset ||
440 ipv4_mask->hdr.time_to_live ||
441 ipv4_mask->hdr.next_proto_id ||
442 ipv4_mask->hdr.hdr_checksum) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM,
446 "Invalid IPv4 mask.");
449 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
450 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
452 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
453 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
455 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
456 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
457 if (ipv4_mask->hdr.src_addr) {
458 filter->src_ipaddr_mask[0] =
459 ipv4_mask->hdr.src_addr;
460 en |= !use_ntuple ? 0 :
461 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
463 if (ipv4_mask->hdr.dst_addr) {
464 filter->dst_ipaddr_mask[0] =
465 ipv4_mask->hdr.dst_addr;
466 en |= !use_ntuple ? 0 :
467 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
469 filter->ip_addr_type = use_ntuple ?
470 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
471 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
472 if (ipv4_spec->hdr.next_proto_id) {
473 filter->ip_protocol =
474 ipv4_spec->hdr.next_proto_id;
476 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
478 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
481 case RTE_FLOW_ITEM_TYPE_IPV6:
482 ipv6_spec = item->spec;
483 ipv6_mask = item->mask;
485 /* Only IP DST and SRC fields are maskable. */
486 if (ipv6_mask->hdr.vtc_flow ||
487 ipv6_mask->hdr.payload_len ||
488 ipv6_mask->hdr.proto ||
489 ipv6_mask->hdr.hop_limits) {
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ITEM,
493 "Invalid IPv6 mask.");
498 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
499 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
501 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
502 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
503 rte_memcpy(filter->src_ipaddr,
504 ipv6_spec->hdr.src_addr, 16);
505 rte_memcpy(filter->dst_ipaddr,
506 ipv6_spec->hdr.dst_addr, 16);
507 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
508 rte_memcpy(filter->src_ipaddr_mask,
509 ipv6_mask->hdr.src_addr, 16);
510 en |= !use_ntuple ? 0 :
511 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
513 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
514 rte_memcpy(filter->dst_ipaddr_mask,
515 ipv6_mask->hdr.dst_addr, 16);
516 en |= !use_ntuple ? 0 :
517 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
519 filter->ip_addr_type = use_ntuple ?
520 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
521 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
523 case RTE_FLOW_ITEM_TYPE_TCP:
524 tcp_spec = item->spec;
525 tcp_mask = item->mask;
527 /* Check TCP mask. Only DST & SRC ports are maskable */
528 if (tcp_mask->hdr.sent_seq ||
529 tcp_mask->hdr.recv_ack ||
530 tcp_mask->hdr.data_off ||
531 tcp_mask->hdr.tcp_flags ||
532 tcp_mask->hdr.rx_win ||
533 tcp_mask->hdr.cksum ||
534 tcp_mask->hdr.tcp_urp) {
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ITEM,
541 filter->src_port = tcp_spec->hdr.src_port;
542 filter->dst_port = tcp_spec->hdr.dst_port;
544 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
545 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
547 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
548 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
549 if (tcp_mask->hdr.dst_port) {
550 filter->dst_port_mask = tcp_mask->hdr.dst_port;
551 en |= !use_ntuple ? 0 :
552 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
554 if (tcp_mask->hdr.src_port) {
555 filter->src_port_mask = tcp_mask->hdr.src_port;
556 en |= !use_ntuple ? 0 :
557 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
560 case RTE_FLOW_ITEM_TYPE_UDP:
561 udp_spec = item->spec;
562 udp_mask = item->mask;
564 if (udp_mask->hdr.dgram_len ||
565 udp_mask->hdr.dgram_cksum) {
566 rte_flow_error_set(error, EINVAL,
567 RTE_FLOW_ERROR_TYPE_ITEM,
573 filter->src_port = udp_spec->hdr.src_port;
574 filter->dst_port = udp_spec->hdr.dst_port;
576 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
577 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
579 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
580 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
582 if (udp_mask->hdr.dst_port) {
583 filter->dst_port_mask = udp_mask->hdr.dst_port;
584 en |= !use_ntuple ? 0 :
585 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
587 if (udp_mask->hdr.src_port) {
588 filter->src_port_mask = udp_mask->hdr.src_port;
589 en |= !use_ntuple ? 0 :
590 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
593 case RTE_FLOW_ITEM_TYPE_VXLAN:
594 vxlan_spec = item->spec;
595 vxlan_mask = item->mask;
596 /* Check if VXLAN item is used to describe protocol.
597 * If yes, both spec and mask should be NULL.
598 * If no, both spec and mask shouldn't be NULL.
600 if ((!vxlan_spec && vxlan_mask) ||
601 (vxlan_spec && !vxlan_mask)) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM,
605 "Invalid VXLAN item");
609 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
610 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
611 vxlan_spec->flags != 0x8) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
615 "Invalid VXLAN item");
619 /* Check if VNI is masked. */
620 if (vxlan_spec && vxlan_mask) {
622 !!memcmp(vxlan_mask->vni, vni_mask,
625 rte_flow_error_set(error, EINVAL,
626 RTE_FLOW_ERROR_TYPE_ITEM,
632 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
635 rte_be_to_cpu_32(tenant_id_be);
636 filter->tunnel_type =
637 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
640 case RTE_FLOW_ITEM_TYPE_NVGRE:
641 nvgre_spec = item->spec;
642 nvgre_mask = item->mask;
643 /* Check if NVGRE item is used to describe protocol.
644 * If yes, both spec and mask should be NULL.
645 * If no, both spec and mask shouldn't be NULL.
647 if ((!nvgre_spec && nvgre_mask) ||
648 (nvgre_spec && !nvgre_mask)) {
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM,
652 "Invalid NVGRE item");
656 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
657 nvgre_spec->protocol != 0x6558) {
658 rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ITEM,
661 "Invalid NVGRE item");
665 if (nvgre_spec && nvgre_mask) {
667 !!memcmp(nvgre_mask->tni, tni_mask,
670 rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ITEM,
676 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
679 rte_be_to_cpu_32(tenant_id_be);
680 filter->tunnel_type =
681 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
684 case RTE_FLOW_ITEM_TYPE_VF:
685 vf_spec = item->spec;
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
691 "Configuring on a VF!");
695 if (vf >= bp->pdev->max_vfs) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
703 filter->mirror_vnic_id =
704 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
706 /* This simply indicates there's no driver
707 * loaded. This is not an error.
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
712 "Unable to get default VNIC for VF");
715 filter->mirror_vnic_id = dflt_vnic;
716 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
723 filter->enables = en;
728 /* Parse attributes */
730 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
731 struct rte_flow_error *error)
733 /* Must be input direction */
734 if (!attr->ingress) {
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
737 attr, "Only support ingress.");
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745 attr, "No support for egress.");
750 if (attr->priority) {
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
753 attr, "No support for priority.");
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
761 attr, "No support for group.");
768 struct bnxt_filter_info *
769 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
770 struct bnxt_vnic_info *vnic)
772 struct bnxt_filter_info *filter1, *f0;
773 struct bnxt_vnic_info *vnic0;
776 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
777 f0 = STAILQ_FIRST(&vnic0->filter);
779 //This flow has same DST MAC as the port/l2 filter.
780 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
783 //This flow needs DST MAC which is not same as port/l2
784 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
785 filter1 = bnxt_get_unused_filter(bp);
788 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
789 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
790 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
791 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
792 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
793 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
796 bnxt_free_filter(bp, filter1);
803 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
804 const struct rte_flow_item pattern[],
805 const struct rte_flow_action actions[],
806 const struct rte_flow_attr *attr,
807 struct rte_flow_error *error,
808 struct bnxt_filter_info *filter)
810 const struct rte_flow_action *act = nxt_non_void_action(actions);
811 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
812 const struct rte_flow_action_queue *act_q;
813 const struct rte_flow_action_vf *act_vf;
814 struct bnxt_vnic_info *vnic, *vnic0;
815 struct bnxt_filter_info *filter1;
820 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
821 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
824 "Cannot create flow on RSS queues");
829 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
833 rc = bnxt_flow_parse_attr(attr, error);
836 //Since we support ingress attribute only - right now.
837 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
840 case RTE_FLOW_ACTION_TYPE_QUEUE:
841 /* Allow this flow. Redirect to a VNIC. */
842 act_q = (const struct rte_flow_action_queue *)act->conf;
843 if (act_q->index >= bp->rx_nr_rings) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ACTION, act,
846 "Invalid queue ID.");
850 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
852 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
853 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ACTION, act,
857 "No matching VNIC for queue ID.");
861 filter->dst_id = vnic->fw_vnic_id;
862 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
863 if (filter1 == NULL) {
867 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
868 PMD_DRV_LOG(DEBUG, "VNIC found\n");
870 case RTE_FLOW_ACTION_TYPE_DROP:
871 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
872 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
873 if (filter1 == NULL) {
877 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
878 if (filter->filter_type == HWRM_CFA_EM_FILTER)
880 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
883 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
885 case RTE_FLOW_ACTION_TYPE_COUNT:
886 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
887 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
888 if (filter1 == NULL) {
892 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
893 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
895 case RTE_FLOW_ACTION_TYPE_VF:
896 act_vf = (const struct rte_flow_action_vf *)act->conf;
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ACTION,
902 "Configuring on a VF!");
907 if (vf >= bp->pdev->max_vfs) {
908 rte_flow_error_set(error, EINVAL,
909 RTE_FLOW_ERROR_TYPE_ACTION,
916 filter->mirror_vnic_id =
917 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
919 /* This simply indicates there's no driver loaded.
920 * This is not an error.
922 rte_flow_error_set(error, EINVAL,
923 RTE_FLOW_ERROR_TYPE_ACTION,
925 "Unable to get default VNIC for VF");
929 filter->mirror_vnic_id = dflt_vnic;
930 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
932 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
933 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
934 if (filter1 == NULL) {
938 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ACTION, act,
950 bnxt_free_filter(bp, filter1);
951 filter1->fw_l2_filter_id = -1;
954 act = nxt_non_void_action(++act);
955 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ACTION,
958 act, "Invalid action.");
967 bnxt_flow_validate(struct rte_eth_dev *dev,
968 const struct rte_flow_attr *attr,
969 const struct rte_flow_item pattern[],
970 const struct rte_flow_action actions[],
971 struct rte_flow_error *error)
973 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
974 struct bnxt_filter_info *filter;
977 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
981 filter = bnxt_get_unused_filter(bp);
982 if (filter == NULL) {
983 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
987 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
989 /* No need to hold on to this filter if we are just validating flow */
990 filter->fw_l2_filter_id = -1;
991 bnxt_free_filter(bp, filter);
997 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
999 struct bnxt_filter_info *mf;
1000 struct rte_flow *flow;
1003 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1004 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1006 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1009 if (mf->filter_type == nf->filter_type &&
1010 mf->flags == nf->flags &&
1011 mf->src_port == nf->src_port &&
1012 mf->src_port_mask == nf->src_port_mask &&
1013 mf->dst_port == nf->dst_port &&
1014 mf->dst_port_mask == nf->dst_port_mask &&
1015 mf->ip_protocol == nf->ip_protocol &&
1016 mf->ip_addr_type == nf->ip_addr_type &&
1017 mf->ethertype == nf->ethertype &&
1018 mf->vni == nf->vni &&
1019 mf->tunnel_type == nf->tunnel_type &&
1020 mf->l2_ovlan == nf->l2_ovlan &&
1021 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1022 mf->l2_ivlan == nf->l2_ivlan &&
1023 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1024 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1025 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1027 !memcmp(mf->src_macaddr, nf->src_macaddr,
1029 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1031 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1032 sizeof(nf->src_ipaddr)) &&
1033 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1034 sizeof(nf->src_ipaddr_mask)) &&
1035 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1036 sizeof(nf->dst_ipaddr)) &&
1037 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1038 sizeof(nf->dst_ipaddr_mask))) {
1039 if (mf->dst_id == nf->dst_id)
1041 /* Same Flow, Different queue
1042 * Clear the old ntuple filter
1044 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1045 bnxt_hwrm_clear_em_filter(bp, mf);
1046 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1047 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1048 /* Free the old filter, update flow
1051 bnxt_free_filter(bp, mf);
1060 static struct rte_flow *
1061 bnxt_flow_create(struct rte_eth_dev *dev,
1062 const struct rte_flow_attr *attr,
1063 const struct rte_flow_item pattern[],
1064 const struct rte_flow_action actions[],
1065 struct rte_flow_error *error)
1067 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1068 struct bnxt_filter_info *filter;
1069 struct bnxt_vnic_info *vnic = NULL;
1070 bool update_flow = false;
1071 struct rte_flow *flow;
1075 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1077 rte_flow_error_set(error, ENOMEM,
1078 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1079 "Failed to allocate memory");
1083 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1085 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1089 filter = bnxt_get_unused_filter(bp);
1090 if (filter == NULL) {
1091 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1095 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1100 ret = bnxt_match_filter(bp, filter);
1101 if (ret == -EEXIST) {
1102 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1103 /* Clear the filter that was created as part of
1104 * validate_and_parse_flow() above
1106 bnxt_hwrm_clear_l2_filter(bp, filter);
1108 } else if (ret == -EXDEV) {
1109 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1110 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1114 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1116 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1117 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1119 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1121 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1122 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1125 for (i = 0; i < bp->nr_vnics; i++) {
1126 vnic = &bp->vnic_info[i];
1127 if (filter->dst_id == vnic->fw_vnic_id)
1132 flow->filter = filter;
1138 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1139 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1143 bnxt_free_filter(bp, filter);
1146 rte_flow_error_set(error, ret,
1147 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1148 "Matching Flow exists.");
1149 else if (ret == -EXDEV)
1150 rte_flow_error_set(error, ret,
1151 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1152 "Flow with pattern exists, updating destination queue");
1154 rte_flow_error_set(error, -ret,
1155 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1156 "Failed to create flow.");
1163 bnxt_flow_destroy(struct rte_eth_dev *dev,
1164 struct rte_flow *flow,
1165 struct rte_flow_error *error)
1167 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1168 struct bnxt_filter_info *filter = flow->filter;
1169 struct bnxt_vnic_info *vnic = flow->vnic;
1172 ret = bnxt_match_filter(bp, filter);
1174 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1175 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1176 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1177 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1178 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1180 bnxt_hwrm_clear_l2_filter(bp, filter);
1182 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1185 rte_flow_error_set(error, -ret,
1186 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1187 "Failed to destroy flow.");
1194 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1196 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1197 struct bnxt_vnic_info *vnic;
1198 struct rte_flow *flow;
1202 for (i = 0; i < bp->nr_vnics; i++) {
1203 vnic = &bp->vnic_info[i];
1204 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1205 struct bnxt_filter_info *filter = flow->filter;
1207 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1208 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1209 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1210 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1213 rte_flow_error_set(error, -ret,
1214 RTE_FLOW_ERROR_TYPE_HANDLE,
1216 "Failed to flush flow in HW.");
1220 STAILQ_REMOVE(&vnic->flow_list, flow,
1229 const struct rte_flow_ops bnxt_flow_ops = {
1230 .validate = bnxt_flow_validate,
1231 .create = bnxt_flow_create,
1232 .destroy = bnxt_flow_destroy,
1233 .flush = bnxt_flow_flush,