1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 #include <rte_alarm.h>
14 #include <rte_cycles.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_ring.h"
22 #include "bnxt_vnic.h"
23 #include "hsi_struct_def_dpdk.h"
26 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
27 const struct rte_flow_item pattern[],
28 const struct rte_flow_action actions[],
29 struct rte_flow_error *error)
32 rte_flow_error_set(error,
34 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
41 rte_flow_error_set(error,
43 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
50 rte_flow_error_set(error,
52 RTE_FLOW_ERROR_TYPE_ATTR,
61 static const struct rte_flow_item *
62 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
65 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
71 static const struct rte_flow_action *
72 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
75 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
82 bnxt_filter_type_check(const struct rte_flow_item pattern[],
83 struct rte_flow_error *error)
85 const struct rte_flow_item *item =
86 bnxt_flow_non_void_item(pattern);
90 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
92 case RTE_FLOW_ITEM_TYPE_ANY:
93 case RTE_FLOW_ITEM_TYPE_ETH:
96 case RTE_FLOW_ITEM_TYPE_VLAN:
100 case RTE_FLOW_ITEM_TYPE_IPV4:
101 case RTE_FLOW_ITEM_TYPE_IPV6:
102 case RTE_FLOW_ITEM_TYPE_TCP:
103 case RTE_FLOW_ITEM_TYPE_UDP:
105 /* need ntuple match, reset exact match */
109 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
115 if (has_vlan && use_ntuple) {
117 "VLAN flow cannot use NTUPLE filter\n");
118 rte_flow_error_set(error, EINVAL,
119 RTE_FLOW_ERROR_TYPE_ITEM,
121 "Cannot use VLAN with NTUPLE");
129 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
130 const struct rte_flow_attr *attr,
131 const struct rte_flow_item pattern[],
132 struct rte_flow_error *error,
133 struct bnxt_filter_info *filter)
135 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
136 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
137 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
138 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
139 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
140 const struct rte_flow_item_udp *udp_spec, *udp_mask;
141 const struct rte_flow_item_eth *eth_spec, *eth_mask;
142 const struct rte_ether_addr *dst, *src;
143 const struct rte_flow_item_nvgre *nvgre_spec;
144 const struct rte_flow_item_nvgre *nvgre_mask;
145 const struct rte_flow_item_gre *gre_spec;
146 const struct rte_flow_item_gre *gre_mask;
147 const struct rte_flow_item_vxlan *vxlan_spec;
148 const struct rte_flow_item_vxlan *vxlan_mask;
149 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
150 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
151 const struct rte_flow_item_vf *vf_spec;
152 uint32_t tenant_id_be = 0, valid_flags = 0;
155 uint32_t en_ethertype;
162 use_ntuple = bnxt_filter_type_check(pattern, error);
165 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
167 filter->filter_type = use_ntuple ?
168 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
169 en_ethertype = use_ntuple ?
170 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
171 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
173 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
175 /* last or range is NOT supported as match criteria */
176 rte_flow_error_set(error, EINVAL,
177 RTE_FLOW_ERROR_TYPE_ITEM,
179 "No support for range");
183 switch (item->type) {
184 case RTE_FLOW_ITEM_TYPE_ANY:
186 ((const struct rte_flow_item_any *)item->spec)->num > 3;
188 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
190 case RTE_FLOW_ITEM_TYPE_ETH:
194 eth_spec = item->spec;
197 eth_mask = item->mask;
199 eth_mask = &rte_flow_item_eth_mask;
201 /* Source MAC address mask cannot be partially set.
202 * Should be All 0's or all 1's.
203 * Destination MAC address mask must not be partially
204 * set. Should be all 1's or all 0's.
206 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
207 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
208 (!rte_is_zero_ether_addr(ð_mask->dst) &&
209 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
210 rte_flow_error_set(error,
212 RTE_FLOW_ERROR_TYPE_ITEM,
214 "MAC_addr mask not valid");
218 /* Mask is not allowed. Only exact matches are */
219 if (eth_mask->type &&
220 eth_mask->type != RTE_BE16(0xffff)) {
221 rte_flow_error_set(error, EINVAL,
222 RTE_FLOW_ERROR_TYPE_ITEM,
224 "ethertype mask not valid");
228 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
229 dst = ð_spec->dst;
230 if (!rte_is_valid_assigned_ether_addr(dst)) {
231 rte_flow_error_set(error,
233 RTE_FLOW_ERROR_TYPE_ITEM,
237 "DMAC is invalid!\n");
240 rte_memcpy(filter->dst_macaddr,
241 ð_spec->dst, RTE_ETHER_ADDR_LEN);
243 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
244 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
245 valid_flags |= inner ?
246 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
247 BNXT_FLOW_L2_DST_VALID_FLAG;
248 filter->priority = attr->priority;
250 "Creating a priority flow\n");
252 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
253 src = ð_spec->src;
254 if (!rte_is_valid_assigned_ether_addr(src)) {
255 rte_flow_error_set(error,
257 RTE_FLOW_ERROR_TYPE_ITEM,
261 "SMAC is invalid!\n");
264 rte_memcpy(filter->src_macaddr,
265 ð_spec->src, RTE_ETHER_ADDR_LEN);
267 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
268 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
269 valid_flags |= inner ?
270 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
271 BNXT_FLOW_L2_SRC_VALID_FLAG;
274 * PMD_DRV_LOG(ERR, "Handle this condition\n");
277 if (eth_mask->type) {
279 rte_be_to_cpu_16(eth_spec->type);
283 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
286 case RTE_FLOW_ITEM_TYPE_VLAN:
287 vlan_spec = item->spec;
290 vlan_mask = item->mask;
292 vlan_mask = &rte_flow_item_vlan_mask;
294 if (en & en_ethertype) {
295 rte_flow_error_set(error, EINVAL,
296 RTE_FLOW_ERROR_TYPE_ITEM,
298 "VLAN TPID matching is not"
302 if (vlan_mask->tci &&
303 vlan_mask->tci == RTE_BE16(0x0fff)) {
304 /* Only the VLAN ID can be matched. */
306 rte_be_to_cpu_16(vlan_spec->tci &
308 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
310 rte_flow_error_set(error,
312 RTE_FLOW_ERROR_TYPE_ITEM,
314 "VLAN mask is invalid");
317 if (vlan_mask->inner_type &&
318 vlan_mask->inner_type != RTE_BE16(0xffff)) {
319 rte_flow_error_set(error, EINVAL,
320 RTE_FLOW_ERROR_TYPE_ITEM,
322 "inner ethertype mask not"
326 if (vlan_mask->inner_type) {
328 rte_be_to_cpu_16(vlan_spec->inner_type);
333 case RTE_FLOW_ITEM_TYPE_IPV4:
334 /* If mask is not involved, we could use EM filters. */
335 ipv4_spec = item->spec;
341 ipv4_mask = item->mask;
343 ipv4_mask = &rte_flow_item_ipv4_mask;
345 /* Only IP DST and SRC fields are maskable. */
346 if (ipv4_mask->hdr.version_ihl ||
347 ipv4_mask->hdr.type_of_service ||
348 ipv4_mask->hdr.total_length ||
349 ipv4_mask->hdr.packet_id ||
350 ipv4_mask->hdr.fragment_offset ||
351 ipv4_mask->hdr.time_to_live ||
352 ipv4_mask->hdr.next_proto_id ||
353 ipv4_mask->hdr.hdr_checksum) {
354 rte_flow_error_set(error,
356 RTE_FLOW_ERROR_TYPE_ITEM,
358 "Invalid IPv4 mask.");
362 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
363 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
366 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
367 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
369 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
370 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
372 if (ipv4_mask->hdr.src_addr) {
373 filter->src_ipaddr_mask[0] =
374 ipv4_mask->hdr.src_addr;
375 en |= !use_ntuple ? 0 :
376 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
379 if (ipv4_mask->hdr.dst_addr) {
380 filter->dst_ipaddr_mask[0] =
381 ipv4_mask->hdr.dst_addr;
382 en |= !use_ntuple ? 0 :
383 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
386 filter->ip_addr_type = use_ntuple ?
387 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
388 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
390 if (ipv4_spec->hdr.next_proto_id) {
391 filter->ip_protocol =
392 ipv4_spec->hdr.next_proto_id;
394 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
396 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
399 case RTE_FLOW_ITEM_TYPE_IPV6:
400 ipv6_spec = item->spec;
406 ipv6_mask = item->mask;
408 ipv6_mask = &rte_flow_item_ipv6_mask;
410 /* Only IP DST and SRC fields are maskable. */
411 if (ipv6_mask->hdr.vtc_flow ||
412 ipv6_mask->hdr.payload_len ||
413 ipv6_mask->hdr.proto ||
414 ipv6_mask->hdr.hop_limits) {
415 rte_flow_error_set(error,
417 RTE_FLOW_ERROR_TYPE_ITEM,
419 "Invalid IPv6 mask.");
424 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
425 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
427 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
428 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
430 rte_memcpy(filter->src_ipaddr,
431 ipv6_spec->hdr.src_addr, 16);
432 rte_memcpy(filter->dst_ipaddr,
433 ipv6_spec->hdr.dst_addr, 16);
435 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
437 rte_memcpy(filter->src_ipaddr_mask,
438 ipv6_mask->hdr.src_addr, 16);
439 en |= !use_ntuple ? 0 :
440 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
443 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
445 rte_memcpy(filter->dst_ipaddr_mask,
446 ipv6_mask->hdr.dst_addr, 16);
447 en |= !use_ntuple ? 0 :
448 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
451 filter->ip_addr_type = use_ntuple ?
452 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
453 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
455 case RTE_FLOW_ITEM_TYPE_TCP:
456 tcp_spec = item->spec;
462 tcp_mask = item->mask;
464 tcp_mask = &rte_flow_item_tcp_mask;
466 /* Check TCP mask. Only DST & SRC ports are maskable */
467 if (tcp_mask->hdr.sent_seq ||
468 tcp_mask->hdr.recv_ack ||
469 tcp_mask->hdr.data_off ||
470 tcp_mask->hdr.tcp_flags ||
471 tcp_mask->hdr.rx_win ||
472 tcp_mask->hdr.cksum ||
473 tcp_mask->hdr.tcp_urp) {
474 rte_flow_error_set(error,
476 RTE_FLOW_ERROR_TYPE_ITEM,
482 filter->src_port = tcp_spec->hdr.src_port;
483 filter->dst_port = tcp_spec->hdr.dst_port;
486 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
487 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
489 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
490 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
492 if (tcp_mask->hdr.dst_port) {
493 filter->dst_port_mask = tcp_mask->hdr.dst_port;
494 en |= !use_ntuple ? 0 :
495 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
498 if (tcp_mask->hdr.src_port) {
499 filter->src_port_mask = tcp_mask->hdr.src_port;
500 en |= !use_ntuple ? 0 :
501 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
504 case RTE_FLOW_ITEM_TYPE_UDP:
505 udp_spec = item->spec;
511 udp_mask = item->mask;
513 udp_mask = &rte_flow_item_udp_mask;
515 if (udp_mask->hdr.dgram_len ||
516 udp_mask->hdr.dgram_cksum) {
517 rte_flow_error_set(error,
519 RTE_FLOW_ERROR_TYPE_ITEM,
525 filter->src_port = udp_spec->hdr.src_port;
526 filter->dst_port = udp_spec->hdr.dst_port;
529 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
530 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
532 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
533 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
535 if (udp_mask->hdr.dst_port) {
536 filter->dst_port_mask = udp_mask->hdr.dst_port;
537 en |= !use_ntuple ? 0 :
538 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
541 if (udp_mask->hdr.src_port) {
542 filter->src_port_mask = udp_mask->hdr.src_port;
543 en |= !use_ntuple ? 0 :
544 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
547 case RTE_FLOW_ITEM_TYPE_VXLAN:
548 vxlan_spec = item->spec;
549 vxlan_mask = item->mask;
550 /* Check if VXLAN item is used to describe protocol.
551 * If yes, both spec and mask should be NULL.
552 * If no, both spec and mask shouldn't be NULL.
554 if ((!vxlan_spec && vxlan_mask) ||
555 (vxlan_spec && !vxlan_mask)) {
556 rte_flow_error_set(error,
558 RTE_FLOW_ERROR_TYPE_ITEM,
560 "Invalid VXLAN item");
564 if (!vxlan_spec && !vxlan_mask) {
565 filter->tunnel_type =
566 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
570 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
571 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
572 vxlan_spec->flags != 0x8) {
573 rte_flow_error_set(error,
575 RTE_FLOW_ERROR_TYPE_ITEM,
577 "Invalid VXLAN item");
581 /* Check if VNI is masked. */
582 if (vxlan_mask != NULL) {
584 !!memcmp(vxlan_mask->vni, vni_mask,
590 RTE_FLOW_ERROR_TYPE_ITEM,
596 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
599 rte_be_to_cpu_32(tenant_id_be);
600 filter->tunnel_type =
601 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
604 case RTE_FLOW_ITEM_TYPE_NVGRE:
605 nvgre_spec = item->spec;
606 nvgre_mask = item->mask;
607 /* Check if NVGRE item is used to describe protocol.
608 * If yes, both spec and mask should be NULL.
609 * If no, both spec and mask shouldn't be NULL.
611 if ((!nvgre_spec && nvgre_mask) ||
612 (nvgre_spec && !nvgre_mask)) {
613 rte_flow_error_set(error,
615 RTE_FLOW_ERROR_TYPE_ITEM,
617 "Invalid NVGRE item");
621 if (!nvgre_spec && !nvgre_mask) {
622 filter->tunnel_type =
623 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
627 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
628 nvgre_spec->protocol != 0x6558) {
629 rte_flow_error_set(error,
631 RTE_FLOW_ERROR_TYPE_ITEM,
633 "Invalid NVGRE item");
637 if (nvgre_spec && nvgre_mask) {
639 !!memcmp(nvgre_mask->tni, tni_mask,
645 RTE_FLOW_ERROR_TYPE_ITEM,
650 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
653 rte_be_to_cpu_32(tenant_id_be);
654 filter->tunnel_type =
655 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
659 case RTE_FLOW_ITEM_TYPE_GRE:
660 gre_spec = (const struct rte_flow_item_gre *)item->spec;
661 gre_mask = (const struct rte_flow_item_gre *)item->mask;
664 *Check if GRE item is used to describe protocol.
665 * If yes, both spec and mask should be NULL.
666 * If no, both spec and mask shouldn't be NULL.
668 if (!!gre_spec ^ !!gre_mask) {
669 rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ITEM,
676 if (!gre_spec && !gre_mask) {
677 filter->tunnel_type =
678 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
683 case RTE_FLOW_ITEM_TYPE_VF:
684 vf_spec = item->spec;
687 rte_flow_error_set(error,
689 RTE_FLOW_ERROR_TYPE_ITEM,
691 "Configuring on a VF!");
695 if (vf >= bp->pdev->max_vfs) {
696 rte_flow_error_set(error,
698 RTE_FLOW_ERROR_TYPE_ITEM,
704 if (!attr->transfer) {
705 rte_flow_error_set(error,
707 RTE_FLOW_ERROR_TYPE_ITEM,
709 "Matching VF traffic without"
710 " affecting it (transfer attribute)"
715 filter->mirror_vnic_id =
716 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
718 /* This simply indicates there's no driver
719 * loaded. This is not an error.
724 RTE_FLOW_ERROR_TYPE_ITEM,
726 "Unable to get default VNIC for VF");
730 filter->mirror_vnic_id = dflt_vnic;
731 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
738 filter->enables = en;
739 filter->valid_flags = valid_flags;
741 /* Items parsed but no filter to create in HW. */
742 if (filter->enables == 0 && filter->valid_flags == 0)
743 filter->filter_type = HWRM_CFA_CONFIG;
748 /* Parse attributes */
750 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
751 struct rte_flow_error *error)
753 /* Must be input direction */
754 if (!attr->ingress) {
755 rte_flow_error_set(error,
757 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
759 "Only support ingress.");
765 rte_flow_error_set(error,
767 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
769 "No support for egress.");
776 static struct bnxt_filter_info *
777 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
779 struct bnxt_filter_info *mf, *f0;
780 struct bnxt_vnic_info *vnic0;
783 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
784 f0 = STAILQ_FIRST(&vnic0->filter);
786 /* This flow has same DST MAC as the port/l2 filter. */
787 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
790 for (i = bp->max_vnics - 1; i >= 0; i--) {
791 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
793 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
796 STAILQ_FOREACH(mf, &vnic->filter, next) {
798 if (mf->matching_l2_fltr_ptr)
801 if (mf->ethertype == nf->ethertype &&
802 mf->l2_ovlan == nf->l2_ovlan &&
803 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
804 mf->l2_ivlan == nf->l2_ivlan &&
805 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
806 !memcmp(mf->src_macaddr, nf->src_macaddr,
807 RTE_ETHER_ADDR_LEN) &&
808 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
816 static struct bnxt_filter_info *
817 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
818 struct bnxt_vnic_info *vnic)
820 struct bnxt_filter_info *filter1;
823 /* Alloc new L2 filter.
824 * This flow needs MAC filter which does not match any existing
827 filter1 = bnxt_get_unused_filter(bp);
831 memcpy(filter1, nf, sizeof(*filter1));
833 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
834 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
835 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
836 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
838 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
839 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
842 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
843 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
844 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
845 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
847 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
848 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
850 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
851 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
855 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
856 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
857 /* Tell the FW where to place the filter in the table. */
858 if (nf->priority > 65535) {
860 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
861 /* This will place the filter in TCAM */
862 filter1->l2_filter_id_hint = (uint64_t)-1;
866 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
867 BNXT_FLOW_L2_SRC_VALID_FLAG |
868 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
869 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
871 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
872 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
873 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
876 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
878 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
879 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
880 /* Num VLANs for drop filter will/should be 0.
881 * If the req is memset to 0, then the count will
882 * be automatically set to 0.
884 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
886 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
889 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
891 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
896 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
899 bnxt_free_filter(bp, filter1);
905 struct bnxt_filter_info *
906 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
907 struct bnxt_vnic_info *vnic)
909 struct bnxt_filter_info *l2_filter = NULL;
911 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
913 l2_filter->l2_ref_cnt++;
915 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
917 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
918 l2_filter->vnic = vnic;
921 nf->matching_l2_fltr_ptr = l2_filter;
926 static void bnxt_vnic_cleanup(struct bnxt *bp, struct bnxt_vnic_info *vnic)
928 if (vnic->rx_queue_cnt > 1)
929 bnxt_hwrm_vnic_ctx_free(bp, vnic);
931 bnxt_hwrm_vnic_free(bp, vnic);
933 rte_free(vnic->fw_grp_ids);
934 vnic->fw_grp_ids = NULL;
936 vnic->rx_queue_cnt = 0;
939 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic,
940 const struct rte_flow_action *act,
941 struct rte_flow_error *error)
943 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
944 uint64_t rx_offloads = dev_conf->rxmode.offloads;
947 if (bp->nr_vnics > bp->max_vnics - 1)
948 return rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
951 "Group id is invalid");
953 rc = bnxt_vnic_grp_alloc(bp, vnic);
955 return rte_flow_error_set(error, -rc,
956 RTE_FLOW_ERROR_TYPE_ACTION,
958 "Failed to alloc VNIC group");
960 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
962 rte_flow_error_set(error, -rc,
963 RTE_FLOW_ERROR_TYPE_ACTION,
965 "Failed to alloc VNIC");
969 /* RSS context is required only when there is more than one RSS ring */
970 if (vnic->rx_queue_cnt > 1) {
971 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
973 rte_flow_error_set(error, -rc,
974 RTE_FLOW_ERROR_TYPE_ACTION,
976 "Failed to alloc VNIC context");
981 if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
982 vnic->vlan_strip = true;
984 vnic->vlan_strip = false;
986 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
988 rte_flow_error_set(error, -rc,
989 RTE_FLOW_ERROR_TYPE_ACTION,
991 "Failed to configure VNIC");
995 rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
997 rte_flow_error_set(error, -rc,
998 RTE_FLOW_ERROR_TYPE_ACTION,
1000 "Failed to configure VNIC plcmode");
1009 bnxt_vnic_cleanup(bp, vnic);
1013 static int match_vnic_rss_cfg(struct bnxt *bp,
1014 struct bnxt_vnic_info *vnic,
1015 const struct rte_flow_action_rss *rss)
1017 unsigned int match = 0, i;
1019 if (vnic->rx_queue_cnt != rss->queue_num)
1022 for (i = 0; i < rss->queue_num; i++) {
1023 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
1024 !bp->rx_queues[rss->queue[i]]->rx_started)
1028 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1031 for (j = 0; j < vnic->rx_queue_cnt; j++) {
1032 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
1033 vnic->fw_grp_ids[j])
1038 if (match != vnic->rx_queue_cnt) {
1040 "VNIC queue count %d vs queues matched %d\n",
1041 match, vnic->rx_queue_cnt);
1049 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
1050 struct bnxt_filter_info *filter1,
1054 !(filter->valid_flags &
1055 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
1056 BNXT_FLOW_L2_SRC_VALID_FLAG |
1057 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
1058 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
1059 BNXT_FLOW_L2_DROP_FLAG |
1060 BNXT_FLOW_PARSE_INNER_FLAG))) {
1061 filter->flags = filter1->flags;
1062 filter->enables = filter1->enables;
1063 filter->filter_type = HWRM_CFA_L2_FILTER;
1064 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
1065 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
1066 filter->pri_hint = filter1->pri_hint;
1067 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
1069 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1070 filter->l2_ref_cnt = filter1->l2_ref_cnt;
1071 filter->flow_id = filter1->flow_id;
1073 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1074 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1077 /* Valid actions supported along with RSS are count and mark. */
1079 bnxt_validate_rss_action(const struct rte_flow_action actions[])
1081 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1082 switch (actions->type) {
1083 case RTE_FLOW_ACTION_TYPE_VOID:
1085 case RTE_FLOW_ACTION_TYPE_RSS:
1096 bnxt_get_vnic(struct bnxt *bp, uint32_t group)
1100 /* For legacy NS3 based implementations,
1101 * group_id will be mapped to a VNIC ID.
1103 if (BNXT_STINGRAY(bp))
1106 /* Non NS3 cases, group_id will be ignored.
1107 * Setting will be configured on default VNIC.
1113 bnxt_vnic_rss_cfg_update(struct bnxt *bp,
1114 struct bnxt_vnic_info *vnic,
1115 const struct rte_flow_action *act,
1116 struct rte_flow_error *error)
1118 const struct rte_flow_action_rss *rss;
1119 unsigned int rss_idx, i;
1124 rss = (const struct rte_flow_action_rss *)act->conf;
1126 /* Currently only Toeplitz hash is supported. */
1127 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1128 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
1129 rte_flow_error_set(error,
1131 RTE_FLOW_ERROR_TYPE_ACTION,
1133 "Unsupported RSS hash function");
1138 /* key_len should match the hash key supported by hardware */
1139 if (rss->key_len != 0 && rss->key_len != HW_HASH_KEY_SIZE) {
1140 rte_flow_error_set(error,
1142 RTE_FLOW_ERROR_TYPE_ACTION,
1144 "Incorrect hash key parameters");
1149 /* Currently RSS hash on inner and outer headers are supported.
1150 * 0 => Default (innermost RSS) setting
1153 if (rss->level > 1) {
1154 rte_flow_error_set(error,
1156 RTE_FLOW_ERROR_TYPE_ACTION,
1158 "Unsupported hash level");
1163 if ((rss->queue_num == 0 && rss->queue != NULL) ||
1164 (rss->queue_num != 0 && rss->queue == NULL)) {
1165 rte_flow_error_set(error,
1167 RTE_FLOW_ERROR_TYPE_ACTION,
1169 "Invalid queue config specified");
1174 /* If RSS types is 0, use a best effort configuration */
1175 types = rss->types ? rss->types : RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6;
1177 hash_type = bnxt_rte_to_hwrm_hash_types(types);
1179 /* If requested types can't be supported, leave existing settings */
1181 vnic->hash_type = hash_type;
1184 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
1186 /* Update RSS key only if key_len != 0 */
1187 if (rss->key_len != 0)
1188 memcpy(vnic->rss_hash_key, rss->key, rss->key_len);
1190 if (rss->queue_num == 0)
1191 goto skip_rss_table;
1193 /* Validate Rx queues */
1194 for (i = 0; i < rss->queue_num; i++) {
1195 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n", rss->queue[i]);
1197 if (rss->queue[i] >= bp->rx_nr_rings ||
1198 !bp->rx_queues[rss->queue[i]]) {
1199 rte_flow_error_set(error,
1201 RTE_FLOW_ERROR_TYPE_ACTION,
1203 "Invalid queue ID for RSS");
1209 /* Prepare the indirection table */
1210 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; rss_idx++) {
1211 struct bnxt_rx_queue *rxq;
1214 idx = rss->queue[rss_idx % rss->queue_num];
1216 if (BNXT_CHIP_P5(bp)) {
1217 rxq = bp->rx_queues[idx];
1218 vnic->rss_table[rss_idx * 2] =
1219 rxq->rx_ring->rx_ring_struct->fw_ring_id;
1220 vnic->rss_table[rss_idx * 2 + 1] =
1221 rxq->cp_ring->cp_ring_struct->fw_ring_id;
1223 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[idx];
1228 rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1230 rte_flow_error_set(error,
1232 RTE_FLOW_ERROR_TYPE_ACTION,
1234 "VNIC RSS configure failed");
1243 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1244 const struct rte_flow_item pattern[],
1245 const struct rte_flow_action actions[],
1246 const struct rte_flow_attr *attr,
1247 struct rte_flow_error *error,
1248 struct bnxt_filter_info *filter)
1250 const struct rte_flow_action *act =
1251 bnxt_flow_non_void_action(actions);
1252 struct bnxt *bp = dev->data->dev_private;
1253 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1254 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1255 const struct rte_flow_action_queue *act_q;
1256 const struct rte_flow_action_vf *act_vf;
1257 struct bnxt_filter_info *filter1 = NULL;
1258 const struct rte_flow_action_rss *rss;
1259 struct bnxt_rx_queue *rxq = NULL;
1260 int dflt_vnic, vnic_id;
1261 unsigned int rss_idx;
1266 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1270 rc = bnxt_flow_parse_attr(attr, error);
1274 /* Since we support ingress attribute only - right now. */
1275 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1276 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1278 use_ntuple = bnxt_filter_type_check(pattern, error);
1281 switch (act->type) {
1282 case RTE_FLOW_ACTION_TYPE_QUEUE:
1283 /* Allow this flow. Redirect to a VNIC. */
1284 act_q = (const struct rte_flow_action_queue *)act->conf;
1285 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1286 rte_flow_error_set(error,
1288 RTE_FLOW_ERROR_TYPE_ACTION,
1290 "Invalid queue ID.");
1294 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1296 if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
1298 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
1299 filter->dst_id = act_q->index;
1300 goto skip_vnic_alloc;
1303 vnic_id = attr->group;
1305 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1306 vnic_id = act_q->index;
1309 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1311 vnic = &bp->vnic_info[vnic_id];
1312 if (vnic->rx_queue_cnt) {
1313 if (vnic->start_grp_id != act_q->index) {
1315 "VNIC already in use\n");
1316 rte_flow_error_set(error,
1318 RTE_FLOW_ERROR_TYPE_ACTION,
1320 "VNIC already in use");
1327 rxq = bp->rx_queues[act_q->index];
1329 if (!(dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) && rxq &&
1330 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1335 "Queue invalid or used with other VNIC\n");
1336 rte_flow_error_set(error,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1340 "Queue invalid queue or in use");
1346 rxq->rx_started = 1;
1347 vnic->rx_queue_cnt++;
1348 vnic->start_grp_id = act_q->index;
1349 vnic->end_grp_id = act_q->index;
1350 vnic->func_default = 0; //This is not a default VNIC.
1352 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1354 rc = bnxt_vnic_prep(bp, vnic, act, error);
1359 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1360 act_q->index, vnic, vnic->fw_grp_ids);
1363 vnic->ff_pool_idx = vnic_id;
1365 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1366 filter->dst_id = vnic->fw_vnic_id;
1368 /* For ntuple filter, create the L2 filter with default VNIC.
1369 * The user specified redirect queue will be set while creating
1370 * the ntuple filter in hardware.
1372 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1374 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1376 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1377 if (filter1 == NULL) {
1378 rte_flow_error_set(error,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1382 "Filter not available");
1387 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1388 filter, filter1, filter1->l2_ref_cnt);
1389 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1391 case RTE_FLOW_ACTION_TYPE_DROP:
1392 vnic0 = &bp->vnic_info[0];
1393 filter->dst_id = vnic0->fw_vnic_id;
1394 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1395 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1396 if (filter1 == NULL) {
1397 rte_flow_error_set(error,
1399 RTE_FLOW_ERROR_TYPE_ACTION,
1401 "Filter not available");
1406 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1408 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1409 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1411 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1413 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1415 case RTE_FLOW_ACTION_TYPE_COUNT:
1416 vnic0 = &bp->vnic_info[0];
1417 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1418 if (filter1 == NULL) {
1419 rte_flow_error_set(error,
1421 RTE_FLOW_ERROR_TYPE_ACTION,
1423 "New filter not available");
1428 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1429 filter->flow_id = filter1->flow_id;
1430 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1432 case RTE_FLOW_ACTION_TYPE_VF:
1433 act_vf = (const struct rte_flow_action_vf *)act->conf;
1436 if (filter->tunnel_type ==
1437 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1438 filter->tunnel_type ==
1439 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1440 /* If issued on a VF, ensure id is 0 and is trusted */
1442 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1443 rte_flow_error_set(error, EINVAL,
1444 RTE_FLOW_ERROR_TYPE_ACTION,
1452 filter->enables |= filter->tunnel_type;
1453 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1457 if (vf >= bp->pdev->max_vfs) {
1458 rte_flow_error_set(error,
1460 RTE_FLOW_ERROR_TYPE_ACTION,
1462 "Incorrect VF id!");
1467 filter->mirror_vnic_id =
1468 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1469 if (dflt_vnic < 0) {
1470 /* This simply indicates there's no driver loaded.
1471 * This is not an error.
1473 rte_flow_error_set(error,
1475 RTE_FLOW_ERROR_TYPE_ACTION,
1477 "Unable to get default VNIC for VF");
1482 filter->mirror_vnic_id = dflt_vnic;
1483 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1485 vnic0 = &bp->vnic_info[0];
1486 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1487 if (filter1 == NULL) {
1488 rte_flow_error_set(error,
1490 RTE_FLOW_ERROR_TYPE_ACTION,
1492 "New filter not available");
1497 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1498 filter->flow_id = filter1->flow_id;
1500 case RTE_FLOW_ACTION_TYPE_RSS:
1501 rc = bnxt_validate_rss_action(actions);
1503 rte_flow_error_set(error,
1505 RTE_FLOW_ERROR_TYPE_ACTION,
1507 "Invalid actions specified with RSS");
1512 rss = (const struct rte_flow_action_rss *)act->conf;
1514 vnic_id = bnxt_get_vnic(bp, attr->group);
1516 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1517 vnic = &bp->vnic_info[vnic_id];
1520 * For non NS3 cases, rte_flow_items will not be considered
1523 if (filter->filter_type == HWRM_CFA_CONFIG) {
1524 /* RSS config update requested */
1525 rc = bnxt_vnic_rss_cfg_update(bp, vnic, act, error);
1529 filter->dst_id = vnic->fw_vnic_id;
1533 /* Check if requested RSS config matches RSS config of VNIC
1534 * only if it is not a fresh VNIC configuration.
1535 * Otherwise the existing VNIC configuration can be used.
1537 if (vnic->rx_queue_cnt) {
1538 rc = match_vnic_rss_cfg(bp, vnic, rss);
1541 "VNIC and RSS config mismatch\n");
1542 rte_flow_error_set(error,
1544 RTE_FLOW_ERROR_TYPE_ACTION,
1546 "VNIC and RSS cfg mismatch");
1553 for (i = 0; i < rss->queue_num; i++) {
1554 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1557 if (!rss->queue[i] ||
1558 rss->queue[i] >= bp->rx_nr_rings ||
1559 !bp->rx_queues[rss->queue[i]]) {
1560 rte_flow_error_set(error,
1562 RTE_FLOW_ERROR_TYPE_ACTION,
1564 "Invalid queue ID for RSS");
1568 rxq = bp->rx_queues[rss->queue[i]];
1570 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1571 INVALID_HW_RING_ID) {
1573 "queue active with other VNIC\n");
1574 rte_flow_error_set(error,
1576 RTE_FLOW_ERROR_TYPE_ACTION,
1578 "Invalid queue ID for RSS");
1584 rxq->rx_started = 1;
1585 vnic->rx_queue_cnt++;
1588 vnic->start_grp_id = rss->queue[0];
1589 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1590 vnic->func_default = 0; //This is not a default VNIC.
1592 rc = bnxt_vnic_prep(bp, vnic, act, error);
1597 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1598 vnic_id, vnic, vnic->fw_grp_ids);
1600 vnic->ff_pool_idx = vnic_id;
1602 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1604 /* This can be done only after vnic_grp_alloc is done. */
1605 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1606 vnic->fw_grp_ids[i] =
1607 bp->grp_info[rss->queue[i]].fw_grp_id;
1608 /* Make sure vnic0 does not use these rings. */
1609 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1613 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1614 for (i = 0; i < vnic->rx_queue_cnt; i++)
1615 vnic->rss_table[rss_idx++] =
1616 vnic->fw_grp_ids[i];
1619 /* Configure RSS only if the queue count is > 1 */
1620 if (vnic->rx_queue_cnt > 1) {
1622 bnxt_rte_to_hwrm_hash_types(rss->types);
1624 bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
1626 if (!rss->key_len) {
1627 /* If hash key has not been specified,
1628 * use random hash key.
1630 bnxt_prandom_bytes(vnic->rss_hash_key,
1633 if (rss->key_len > HW_HASH_KEY_SIZE)
1634 memcpy(vnic->rss_hash_key,
1638 memcpy(vnic->rss_hash_key,
1642 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1644 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1648 filter->dst_id = vnic->fw_vnic_id;
1649 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1650 if (filter1 == NULL) {
1651 rte_flow_error_set(error,
1653 RTE_FLOW_ERROR_TYPE_ACTION,
1655 "New filter not available");
1660 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1661 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1663 case RTE_FLOW_ACTION_TYPE_MARK:
1664 if (bp->mark_table == NULL) {
1665 rte_flow_error_set(error,
1667 RTE_FLOW_ERROR_TYPE_ACTION,
1669 "Mark table not allocated.");
1674 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1676 "Disabling vector processing for mark\n");
1677 bp->eth_dev->rx_pkt_burst = bnxt_recv_pkts;
1678 bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
1681 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1682 filter->mark = ((const struct rte_flow_action_mark *)
1684 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1687 rte_flow_error_set(error,
1689 RTE_FLOW_ERROR_TYPE_ACTION,
1697 act = bnxt_flow_non_void_action(++act);
1698 while (act->type != RTE_FLOW_ACTION_TYPE_END)
1705 bnxt_hwrm_clear_l2_filter(bp, filter1);
1706 bnxt_free_filter(bp, filter1);
1710 if (vnic && STAILQ_EMPTY(&vnic->filter))
1711 vnic->rx_queue_cnt = 0;
1713 if (rxq && !vnic->rx_queue_cnt)
1714 rxq->vnic = &bp->vnic_info[0];
1720 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1721 struct bnxt_filter_info *filter)
1723 struct bnxt_vnic_info *vnic = NULL;
1726 for (i = 0; i < bp->max_vnics; i++) {
1727 vnic = &bp->vnic_info[i];
1728 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1729 filter->dst_id == vnic->fw_vnic_id) {
1730 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1739 bnxt_flow_validate(struct rte_eth_dev *dev,
1740 const struct rte_flow_attr *attr,
1741 const struct rte_flow_item pattern[],
1742 const struct rte_flow_action actions[],
1743 struct rte_flow_error *error)
1745 struct bnxt *bp = dev->data->dev_private;
1746 struct bnxt_vnic_info *vnic = NULL;
1747 struct bnxt_filter_info *filter;
1750 bnxt_acquire_flow_lock(bp);
1751 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1753 bnxt_release_flow_lock(bp);
1757 filter = bnxt_get_unused_filter(bp);
1758 if (filter == NULL) {
1759 rte_flow_error_set(error, ENOSPC,
1760 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1761 "Not enough resources for a new flow");
1762 bnxt_release_flow_lock(bp);
1766 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1771 vnic = find_matching_vnic(bp, filter);
1773 if (STAILQ_EMPTY(&vnic->filter)) {
1774 bnxt_vnic_cleanup(bp, vnic);
1776 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1780 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1781 bnxt_hwrm_clear_em_filter(bp, filter);
1782 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1783 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1785 bnxt_hwrm_clear_l2_filter(bp, filter);
1788 /* No need to hold on to this filter if we are just validating flow */
1789 bnxt_free_filter(bp, filter);
1790 bnxt_release_flow_lock(bp);
1796 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1797 struct bnxt_filter_info *new_filter)
1799 /* Clear the new L2 filter that was created in the previous step in
1800 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1801 * filter which points to the new destination queue and so we clear
1802 * the previous L2 filter. For ntuple filters, we are going to reuse
1803 * the old L2 filter and create new NTUPLE filter with this new
1804 * destination queue subsequently during bnxt_flow_create. So we
1805 * decrement the ref cnt of the L2 filter that would've been bumped
1806 * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1807 * filter that was referencing it will be deleted now.
1809 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1810 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1811 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1813 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1814 bnxt_hwrm_clear_em_filter(bp, old_filter);
1815 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1816 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1821 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1823 struct bnxt_filter_info *mf;
1824 struct rte_flow *flow;
1827 for (i = bp->max_vnics - 1; i >= 0; i--) {
1828 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1830 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1833 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1836 if (mf->filter_type == nf->filter_type &&
1837 mf->flags == nf->flags &&
1838 mf->src_port == nf->src_port &&
1839 mf->src_port_mask == nf->src_port_mask &&
1840 mf->dst_port == nf->dst_port &&
1841 mf->dst_port_mask == nf->dst_port_mask &&
1842 mf->ip_protocol == nf->ip_protocol &&
1843 mf->ip_addr_type == nf->ip_addr_type &&
1844 mf->ethertype == nf->ethertype &&
1845 mf->vni == nf->vni &&
1846 mf->tunnel_type == nf->tunnel_type &&
1847 mf->l2_ovlan == nf->l2_ovlan &&
1848 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1849 mf->l2_ivlan == nf->l2_ivlan &&
1850 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1851 !memcmp(mf->l2_addr, nf->l2_addr,
1852 RTE_ETHER_ADDR_LEN) &&
1853 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1854 RTE_ETHER_ADDR_LEN) &&
1855 !memcmp(mf->src_macaddr, nf->src_macaddr,
1856 RTE_ETHER_ADDR_LEN) &&
1857 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1858 RTE_ETHER_ADDR_LEN) &&
1859 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1860 sizeof(nf->src_ipaddr)) &&
1861 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1862 sizeof(nf->src_ipaddr_mask)) &&
1863 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1864 sizeof(nf->dst_ipaddr)) &&
1865 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1866 sizeof(nf->dst_ipaddr_mask))) {
1867 if (mf->dst_id == nf->dst_id)
1869 /* Free the old filter, update flow
1872 bnxt_update_filter(bp, mf, nf);
1873 STAILQ_REMOVE(&vnic->filter, mf,
1874 bnxt_filter_info, next);
1875 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1876 bnxt_free_filter(bp, mf);
1886 bnxt_setup_flow_counter(struct bnxt *bp)
1888 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1889 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
1890 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1891 bnxt_flow_cnt_alarm_cb,
1893 bp->flags |= BNXT_FLAG_FC_THREAD;
1897 void bnxt_flow_cnt_alarm_cb(void *arg)
1900 struct bnxt *bp = arg;
1902 if (!bp->flow_stat->rx_fc_out_tbl.va) {
1903 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
1904 bnxt_cancel_fc_thread(bp);
1908 if (!bp->flow_stat->flow_count) {
1909 bnxt_cancel_fc_thread(bp);
1913 if (!bp->eth_dev->data->dev_started) {
1914 bnxt_cancel_fc_thread(bp);
1918 rc = bnxt_flow_stats_req(bp);
1920 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
1924 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1925 bnxt_flow_cnt_alarm_cb,
1930 static struct rte_flow *
1931 bnxt_flow_create(struct rte_eth_dev *dev,
1932 const struct rte_flow_attr *attr,
1933 const struct rte_flow_item pattern[],
1934 const struct rte_flow_action actions[],
1935 struct rte_flow_error *error)
1937 struct bnxt *bp = dev->data->dev_private;
1938 struct bnxt_vnic_info *vnic = NULL;
1939 struct bnxt_filter_info *filter;
1940 bool update_flow = false;
1941 struct rte_flow *flow;
1943 uint32_t tun_type, flow_id;
1945 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1946 rte_flow_error_set(error, EINVAL,
1947 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1948 "Failed to create flow, Not a Trusted VF!");
1952 if (!dev->data->dev_started) {
1953 rte_flow_error_set(error,
1955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1957 "Device must be started");
1961 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1963 rte_flow_error_set(error, ENOMEM,
1964 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1965 "Failed to allocate memory");
1969 bnxt_acquire_flow_lock(bp);
1970 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1972 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1976 filter = bnxt_get_unused_filter(bp);
1977 if (filter == NULL) {
1978 rte_flow_error_set(error, ENOSPC,
1979 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1980 "Not enough resources for a new flow");
1984 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1989 ret = bnxt_match_filter(bp, filter);
1990 if (ret == -EEXIST) {
1991 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1992 /* Clear the filter that was created as part of
1993 * validate_and_parse_flow() above
1995 bnxt_hwrm_clear_l2_filter(bp, filter);
1997 } else if (ret == -EXDEV) {
1998 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1999 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
2003 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
2004 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
2007 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
2008 filter->enables == filter->tunnel_type) {
2009 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
2011 rte_flow_error_set(error, -ret,
2012 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2013 "Unable to query tunnel to VF");
2016 if (tun_type == (1U << filter->tunnel_type)) {
2018 bnxt_hwrm_tunnel_redirect_free(bp,
2019 filter->tunnel_type);
2022 "Unable to free existing tunnel\n");
2023 rte_flow_error_set(error, -ret,
2024 RTE_FLOW_ERROR_TYPE_HANDLE,
2026 "Unable to free preexisting "
2031 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
2033 rte_flow_error_set(error, -ret,
2034 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2035 "Unable to redirect tunnel to VF");
2038 vnic = &bp->vnic_info[0];
2042 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2044 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2045 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
2047 rte_flow_error_set(error, -ret,
2048 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2049 "Failed to create EM filter");
2054 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2056 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
2057 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
2059 rte_flow_error_set(error, -ret,
2060 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2061 "Failed to create ntuple filter");
2066 if (BNXT_RFS_NEEDS_VNIC(bp))
2067 vnic = find_matching_vnic(bp, filter);
2069 vnic = BNXT_GET_DEFAULT_VNIC(bp);
2071 if (!ret || update_flow) {
2072 flow->filter = filter;
2079 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
2081 "Mark action: mark id 0x%x, flow id 0x%x\n",
2082 filter->mark, filter->flow_id);
2084 /* TCAM and EM should be 16-bit only.
2085 * Other modes not supported.
2087 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
2088 if (bp->mark_table[flow_id].valid) {
2089 rte_flow_error_set(error, EEXIST,
2090 RTE_FLOW_ERROR_TYPE_HANDLE,
2092 "Flow with mark id exists");
2093 bnxt_clear_one_vnic_filter(bp, filter);
2096 bp->mark_table[flow_id].valid = true;
2097 bp->mark_table[flow_id].mark_id = filter->mark;
2100 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
2101 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
2103 if (BNXT_FLOW_XSTATS_EN(bp))
2104 bp->flow_stat->flow_count++;
2105 bnxt_release_flow_lock(bp);
2106 bnxt_setup_flow_counter(bp);
2107 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
2112 bnxt_free_filter(bp, filter);
2115 rte_flow_error_set(error, ret,
2116 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2117 "Matching Flow exists.");
2118 else if (ret == -EXDEV)
2119 rte_flow_error_set(error, 0,
2120 RTE_FLOW_ERROR_TYPE_NONE, NULL,
2121 "Flow with pattern exists, updating destination queue");
2122 else if (!rte_errno)
2123 rte_flow_error_set(error, -ret,
2124 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2125 "Failed to create flow.");
2128 bnxt_release_flow_lock(bp);
2132 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
2133 struct bnxt_filter_info *filter,
2134 struct rte_flow_error *error)
2136 uint16_t tun_dst_fid;
2140 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
2142 rte_flow_error_set(error, -ret,
2143 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2144 "Unable to query tunnel to VF");
2147 if (tun_type == (1U << filter->tunnel_type)) {
2148 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
2151 rte_flow_error_set(error, -ret,
2152 RTE_FLOW_ERROR_TYPE_HANDLE,
2154 "tunnel_redirect info cmd fail");
2157 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
2158 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
2160 /* Tunnel doesn't belong to this VF, so don't send HWRM
2161 * cmd, just delete the flow from driver
2163 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id)) {
2165 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
2167 ret = bnxt_hwrm_tunnel_redirect_free(bp,
2168 filter->tunnel_type);
2170 rte_flow_error_set(error, -ret,
2171 RTE_FLOW_ERROR_TYPE_HANDLE,
2173 "Unable to free tunnel redirection");
2182 _bnxt_flow_destroy(struct bnxt *bp,
2183 struct rte_flow *flow,
2184 struct rte_flow_error *error)
2186 struct bnxt_filter_info *filter;
2187 struct bnxt_vnic_info *vnic;
2191 filter = flow->filter;
2194 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
2195 filter->enables == filter->tunnel_type) {
2196 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
2203 /* For config type, there is no filter in HW. Finish cleanup here */
2204 if (filter->filter_type == HWRM_CFA_CONFIG)
2207 ret = bnxt_match_filter(bp, filter);
2209 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
2211 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
2212 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
2213 memset(&bp->mark_table[flow_id], 0,
2214 sizeof(bp->mark_table[flow_id]));
2215 filter->flow_id = 0;
2218 ret = bnxt_clear_one_vnic_filter(bp, filter);
2222 /* If it is a L2 drop filter, when the filter is created,
2223 * the FW updates the BC/MC records.
2224 * Once this filter is removed, issue the set_rx_mask command
2225 * to reset the BC/MC records in the HW to the settings
2226 * before the drop counter is created.
2228 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
2229 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
2231 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2232 bnxt_free_filter(bp, filter);
2233 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2235 if (BNXT_FLOW_XSTATS_EN(bp))
2236 bp->flow_stat->flow_count--;
2238 /* If this was the last flow associated with this vnic,
2239 * switch the queue back to RSS pool.
2241 if (vnic && !vnic->func_default &&
2242 STAILQ_EMPTY(&vnic->flow_list)) {
2243 bnxt_vnic_cleanup(bp, vnic);
2247 rte_flow_error_set(error, -ret,
2248 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2249 "Failed to destroy flow.");
2256 bnxt_flow_destroy(struct rte_eth_dev *dev,
2257 struct rte_flow *flow,
2258 struct rte_flow_error *error)
2260 struct bnxt *bp = dev->data->dev_private;
2263 bnxt_acquire_flow_lock(bp);
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2267 "Invalid flow: failed to destroy flow.");
2268 bnxt_release_flow_lock(bp);
2272 if (!flow->filter) {
2273 rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2275 "Invalid flow: failed to destroy flow.");
2276 bnxt_release_flow_lock(bp);
2279 ret = _bnxt_flow_destroy(bp, flow, error);
2280 bnxt_release_flow_lock(bp);
2285 void bnxt_cancel_fc_thread(struct bnxt *bp)
2287 bp->flags &= ~BNXT_FLAG_FC_THREAD;
2288 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
2292 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2294 struct bnxt *bp = dev->data->dev_private;
2295 struct bnxt_vnic_info *vnic;
2296 struct rte_flow *flow;
2300 bnxt_acquire_flow_lock(bp);
2301 for (i = 0; i < bp->max_vnics; i++) {
2302 vnic = &bp->vnic_info[i];
2303 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
2306 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2307 flow = STAILQ_FIRST(&vnic->flow_list);
2312 ret = _bnxt_flow_destroy(bp, flow, error);
2318 bnxt_cancel_fc_thread(bp);
2319 bnxt_release_flow_lock(bp);
2324 const struct rte_flow_ops bnxt_flow_ops = {
2325 .validate = bnxt_flow_validate,
2326 .create = bnxt_flow_create,
2327 .destroy = bnxt_flow_destroy,
2328 .flush = bnxt_flow_flush,