1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
23 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
24 const struct rte_flow_item pattern[],
25 const struct rte_flow_action actions[],
26 struct rte_flow_error *error)
29 rte_flow_error_set(error,
31 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
38 rte_flow_error_set(error,
40 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
47 rte_flow_error_set(error,
49 RTE_FLOW_ERROR_TYPE_ATTR,
58 static const struct rte_flow_item *
59 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
62 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
68 static const struct rte_flow_action *
69 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
72 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
79 bnxt_filter_type_check(const struct rte_flow_item pattern[],
80 struct rte_flow_error *error __rte_unused)
82 const struct rte_flow_item *item =
83 bnxt_flow_non_void_item(pattern);
87 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89 case RTE_FLOW_ITEM_TYPE_ANY:
90 case RTE_FLOW_ITEM_TYPE_ETH:
93 case RTE_FLOW_ITEM_TYPE_VLAN:
97 case RTE_FLOW_ITEM_TYPE_IPV4:
98 case RTE_FLOW_ITEM_TYPE_IPV6:
99 case RTE_FLOW_ITEM_TYPE_TCP:
100 case RTE_FLOW_ITEM_TYPE_UDP:
102 /* need ntuple match, reset exact match */
106 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
112 if (has_vlan && use_ntuple) {
114 "VLAN flow cannot use NTUPLE filter\n");
115 rte_flow_error_set(error, EINVAL,
116 RTE_FLOW_ERROR_TYPE_ITEM,
118 "Cannot use VLAN with NTUPLE");
126 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
127 const struct rte_flow_attr *attr,
128 const struct rte_flow_item pattern[],
129 struct rte_flow_error *error,
130 struct bnxt_filter_info *filter)
132 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
133 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
134 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
135 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
136 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
137 const struct rte_flow_item_udp *udp_spec, *udp_mask;
138 const struct rte_flow_item_eth *eth_spec, *eth_mask;
139 const struct rte_ether_addr *dst, *src;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 if (!item->spec || !item->mask) {
181 rte_flow_error_set(error, EINVAL,
182 RTE_FLOW_ERROR_TYPE_ITEM,
184 "spec/mask is NULL");
188 switch (item->type) {
189 case RTE_FLOW_ITEM_TYPE_ANY:
191 ((const struct rte_flow_item_any *)item->spec)->num > 3;
193 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
195 case RTE_FLOW_ITEM_TYPE_ETH:
196 if (!item->spec || !item->mask)
199 eth_spec = item->spec;
200 eth_mask = item->mask;
202 /* Source MAC address mask cannot be partially set.
203 * Should be All 0's or all 1's.
204 * Destination MAC address mask must not be partially
205 * set. Should be all 1's or all 0's.
207 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
208 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
209 (!rte_is_zero_ether_addr(ð_mask->dst) &&
210 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
211 rte_flow_error_set(error,
213 RTE_FLOW_ERROR_TYPE_ITEM,
215 "MAC_addr mask not valid");
219 /* Mask is not allowed. Only exact matches are */
220 if (eth_mask->type &&
221 eth_mask->type != RTE_BE16(0xffff)) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ITEM,
225 "ethertype mask not valid");
229 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
230 dst = ð_spec->dst;
231 if (!rte_is_valid_assigned_ether_addr(dst)) {
232 rte_flow_error_set(error,
234 RTE_FLOW_ERROR_TYPE_ITEM,
238 "DMAC is invalid!\n");
241 rte_memcpy(filter->dst_macaddr,
242 ð_spec->dst, RTE_ETHER_ADDR_LEN);
244 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
245 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
246 valid_flags |= inner ?
247 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
248 BNXT_FLOW_L2_DST_VALID_FLAG;
249 filter->priority = attr->priority;
251 "Creating a priority flow\n");
253 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
254 src = ð_spec->src;
255 if (!rte_is_valid_assigned_ether_addr(src)) {
256 rte_flow_error_set(error,
258 RTE_FLOW_ERROR_TYPE_ITEM,
262 "SMAC is invalid!\n");
265 rte_memcpy(filter->src_macaddr,
266 ð_spec->src, RTE_ETHER_ADDR_LEN);
268 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
269 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
270 valid_flags |= inner ?
271 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
272 BNXT_FLOW_L2_SRC_VALID_FLAG;
275 * PMD_DRV_LOG(ERR, "Handle this condition\n");
278 if (eth_mask->type) {
280 rte_be_to_cpu_16(eth_spec->type);
284 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
287 case RTE_FLOW_ITEM_TYPE_VLAN:
288 vlan_spec = item->spec;
289 vlan_mask = item->mask;
290 if (en & en_ethertype) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
294 "VLAN TPID matching is not"
298 if (vlan_mask->tci &&
299 vlan_mask->tci == RTE_BE16(0x0fff)) {
300 /* Only the VLAN ID can be matched. */
302 rte_be_to_cpu_16(vlan_spec->tci &
304 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
306 rte_flow_error_set(error,
308 RTE_FLOW_ERROR_TYPE_ITEM,
310 "VLAN mask is invalid");
313 if (vlan_mask->inner_type &&
314 vlan_mask->inner_type != RTE_BE16(0xffff)) {
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_ITEM,
318 "inner ethertype mask not"
322 if (vlan_mask->inner_type) {
324 rte_be_to_cpu_16(vlan_spec->inner_type);
329 case RTE_FLOW_ITEM_TYPE_IPV4:
330 /* If mask is not involved, we could use EM filters. */
331 ipv4_spec = item->spec;
332 ipv4_mask = item->mask;
334 if (!item->spec || !item->mask)
337 /* Only IP DST and SRC fields are maskable. */
338 if (ipv4_mask->hdr.version_ihl ||
339 ipv4_mask->hdr.type_of_service ||
340 ipv4_mask->hdr.total_length ||
341 ipv4_mask->hdr.packet_id ||
342 ipv4_mask->hdr.fragment_offset ||
343 ipv4_mask->hdr.time_to_live ||
344 ipv4_mask->hdr.next_proto_id ||
345 ipv4_mask->hdr.hdr_checksum) {
346 rte_flow_error_set(error,
348 RTE_FLOW_ERROR_TYPE_ITEM,
350 "Invalid IPv4 mask.");
354 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
355 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
358 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
359 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
361 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
362 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
364 if (ipv4_mask->hdr.src_addr) {
365 filter->src_ipaddr_mask[0] =
366 ipv4_mask->hdr.src_addr;
367 en |= !use_ntuple ? 0 :
368 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
371 if (ipv4_mask->hdr.dst_addr) {
372 filter->dst_ipaddr_mask[0] =
373 ipv4_mask->hdr.dst_addr;
374 en |= !use_ntuple ? 0 :
375 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
378 filter->ip_addr_type = use_ntuple ?
379 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
380 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
382 if (ipv4_spec->hdr.next_proto_id) {
383 filter->ip_protocol =
384 ipv4_spec->hdr.next_proto_id;
386 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
388 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
391 case RTE_FLOW_ITEM_TYPE_IPV6:
392 ipv6_spec = item->spec;
393 ipv6_mask = item->mask;
395 if (!item->spec || !item->mask)
398 /* Only IP DST and SRC fields are maskable. */
399 if (ipv6_mask->hdr.vtc_flow ||
400 ipv6_mask->hdr.payload_len ||
401 ipv6_mask->hdr.proto ||
402 ipv6_mask->hdr.hop_limits) {
403 rte_flow_error_set(error,
405 RTE_FLOW_ERROR_TYPE_ITEM,
407 "Invalid IPv6 mask.");
412 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
413 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
415 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
416 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
418 rte_memcpy(filter->src_ipaddr,
419 ipv6_spec->hdr.src_addr, 16);
420 rte_memcpy(filter->dst_ipaddr,
421 ipv6_spec->hdr.dst_addr, 16);
423 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
425 rte_memcpy(filter->src_ipaddr_mask,
426 ipv6_mask->hdr.src_addr, 16);
427 en |= !use_ntuple ? 0 :
428 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
431 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
433 rte_memcpy(filter->dst_ipaddr_mask,
434 ipv6_mask->hdr.dst_addr, 16);
435 en |= !use_ntuple ? 0 :
436 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
439 filter->ip_addr_type = use_ntuple ?
440 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
441 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
443 case RTE_FLOW_ITEM_TYPE_TCP:
444 tcp_spec = item->spec;
445 tcp_mask = item->mask;
447 if (!item->spec || !item->mask)
450 /* Check TCP mask. Only DST & SRC ports are maskable */
451 if (tcp_mask->hdr.sent_seq ||
452 tcp_mask->hdr.recv_ack ||
453 tcp_mask->hdr.data_off ||
454 tcp_mask->hdr.tcp_flags ||
455 tcp_mask->hdr.rx_win ||
456 tcp_mask->hdr.cksum ||
457 tcp_mask->hdr.tcp_urp) {
458 rte_flow_error_set(error,
460 RTE_FLOW_ERROR_TYPE_ITEM,
466 filter->src_port = tcp_spec->hdr.src_port;
467 filter->dst_port = tcp_spec->hdr.dst_port;
470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
473 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
474 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
476 if (tcp_mask->hdr.dst_port) {
477 filter->dst_port_mask = tcp_mask->hdr.dst_port;
478 en |= !use_ntuple ? 0 :
479 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
482 if (tcp_mask->hdr.src_port) {
483 filter->src_port_mask = tcp_mask->hdr.src_port;
484 en |= !use_ntuple ? 0 :
485 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
488 case RTE_FLOW_ITEM_TYPE_UDP:
489 udp_spec = item->spec;
490 udp_mask = item->mask;
492 if (!item->spec || !item->mask)
495 if (udp_mask->hdr.dgram_len ||
496 udp_mask->hdr.dgram_cksum) {
497 rte_flow_error_set(error,
499 RTE_FLOW_ERROR_TYPE_ITEM,
505 filter->src_port = udp_spec->hdr.src_port;
506 filter->dst_port = udp_spec->hdr.dst_port;
509 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
510 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
512 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
513 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
515 if (udp_mask->hdr.dst_port) {
516 filter->dst_port_mask = udp_mask->hdr.dst_port;
517 en |= !use_ntuple ? 0 :
518 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
521 if (udp_mask->hdr.src_port) {
522 filter->src_port_mask = udp_mask->hdr.src_port;
523 en |= !use_ntuple ? 0 :
524 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
527 case RTE_FLOW_ITEM_TYPE_VXLAN:
528 vxlan_spec = item->spec;
529 vxlan_mask = item->mask;
530 /* Check if VXLAN item is used to describe protocol.
531 * If yes, both spec and mask should be NULL.
532 * If no, both spec and mask shouldn't be NULL.
534 if ((!vxlan_spec && vxlan_mask) ||
535 (vxlan_spec && !vxlan_mask)) {
536 rte_flow_error_set(error,
538 RTE_FLOW_ERROR_TYPE_ITEM,
540 "Invalid VXLAN item");
544 if (!vxlan_spec && !vxlan_mask) {
545 filter->tunnel_type =
546 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
550 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
551 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
552 vxlan_spec->flags != 0x8) {
553 rte_flow_error_set(error,
555 RTE_FLOW_ERROR_TYPE_ITEM,
557 "Invalid VXLAN item");
561 /* Check if VNI is masked. */
562 if (vxlan_spec && vxlan_mask) {
564 !!memcmp(vxlan_mask->vni, vni_mask,
570 RTE_FLOW_ERROR_TYPE_ITEM,
576 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
579 rte_be_to_cpu_32(tenant_id_be);
580 filter->tunnel_type =
581 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
584 case RTE_FLOW_ITEM_TYPE_NVGRE:
585 nvgre_spec = item->spec;
586 nvgre_mask = item->mask;
587 /* Check if NVGRE item is used to describe protocol.
588 * If yes, both spec and mask should be NULL.
589 * If no, both spec and mask shouldn't be NULL.
591 if ((!nvgre_spec && nvgre_mask) ||
592 (nvgre_spec && !nvgre_mask)) {
593 rte_flow_error_set(error,
595 RTE_FLOW_ERROR_TYPE_ITEM,
597 "Invalid NVGRE item");
601 if (!nvgre_spec && !nvgre_mask) {
602 filter->tunnel_type =
603 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
607 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
608 nvgre_spec->protocol != 0x6558) {
609 rte_flow_error_set(error,
611 RTE_FLOW_ERROR_TYPE_ITEM,
613 "Invalid NVGRE item");
617 if (nvgre_spec && nvgre_mask) {
619 !!memcmp(nvgre_mask->tni, tni_mask,
625 RTE_FLOW_ERROR_TYPE_ITEM,
630 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
633 rte_be_to_cpu_32(tenant_id_be);
634 filter->tunnel_type =
635 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
639 case RTE_FLOW_ITEM_TYPE_GRE:
640 gre_spec = (const struct rte_flow_item_gre *)item->spec;
641 gre_mask = (const struct rte_flow_item_gre *)item->mask;
644 *Check if GRE item is used to describe protocol.
645 * If yes, both spec and mask should be NULL.
646 * If no, both spec and mask shouldn't be NULL.
648 if (!!gre_spec ^ !!gre_mask) {
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM,
656 if (!gre_spec && !gre_mask) {
657 filter->tunnel_type =
658 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
663 case RTE_FLOW_ITEM_TYPE_VF:
664 vf_spec = item->spec;
667 rte_flow_error_set(error,
669 RTE_FLOW_ERROR_TYPE_ITEM,
671 "Configuring on a VF!");
675 if (vf >= bp->pdev->max_vfs) {
676 rte_flow_error_set(error,
678 RTE_FLOW_ERROR_TYPE_ITEM,
684 if (!attr->transfer) {
685 rte_flow_error_set(error,
687 RTE_FLOW_ERROR_TYPE_ITEM,
689 "Matching VF traffic without"
690 " affecting it (transfer attribute)"
695 filter->mirror_vnic_id =
696 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
698 /* This simply indicates there's no driver
699 * loaded. This is not an error.
704 RTE_FLOW_ERROR_TYPE_ITEM,
706 "Unable to get default VNIC for VF");
710 filter->mirror_vnic_id = dflt_vnic;
711 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
718 filter->enables = en;
719 filter->valid_flags = valid_flags;
724 /* Parse attributes */
726 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
727 struct rte_flow_error *error)
729 /* Must be input direction */
730 if (!attr->ingress) {
731 rte_flow_error_set(error,
733 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
735 "Only support ingress.");
741 rte_flow_error_set(error,
743 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745 "No support for egress.");
752 static struct bnxt_filter_info *
753 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
755 struct bnxt_filter_info *mf, *f0;
756 struct bnxt_vnic_info *vnic0;
757 struct rte_flow *flow;
760 vnic0 = &bp->vnic_info[0];
761 f0 = STAILQ_FIRST(&vnic0->filter);
763 /* This flow has same DST MAC as the port/l2 filter. */
764 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
767 for (i = bp->max_vnics - 1; i >= 0; i--) {
768 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
770 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
773 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
776 if (mf->matching_l2_fltr_ptr)
779 if (mf->ethertype == nf->ethertype &&
780 mf->l2_ovlan == nf->l2_ovlan &&
781 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
782 mf->l2_ivlan == nf->l2_ivlan &&
783 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
784 !memcmp(mf->src_macaddr, nf->src_macaddr,
785 RTE_ETHER_ADDR_LEN) &&
786 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
794 static struct bnxt_filter_info *
795 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
796 struct bnxt_vnic_info *vnic)
798 struct bnxt_filter_info *filter1;
801 /* Alloc new L2 filter.
802 * This flow needs MAC filter which does not match any existing
805 filter1 = bnxt_get_unused_filter(bp);
809 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
810 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
811 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
812 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
814 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
815 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
818 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
819 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
820 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
821 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
823 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
824 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
826 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
827 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
831 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
832 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
833 /* Tell the FW where to place the filter in the table. */
834 if (nf->priority > 65535) {
836 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
837 /* This will place the filter in TCAM */
838 filter1->l2_filter_id_hint = (uint64_t)-1;
842 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
843 BNXT_FLOW_L2_SRC_VALID_FLAG |
844 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
845 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
847 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
848 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
849 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
852 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
854 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
855 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
856 /* Num VLANs for drop filter will/should be 0.
857 * If the req is memset to 0, then the count will
858 * be automatically set to 0.
860 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
862 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
865 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
867 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
872 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
875 bnxt_free_filter(bp, filter1);
878 filter1->l2_ref_cnt++;
882 struct bnxt_filter_info *
883 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
884 struct bnxt_vnic_info *vnic)
886 struct bnxt_filter_info *l2_filter = NULL;
888 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
890 l2_filter->l2_ref_cnt++;
891 nf->matching_l2_fltr_ptr = l2_filter;
893 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
894 nf->matching_l2_fltr_ptr = NULL;
900 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
902 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
903 uint64_t rx_offloads = dev_conf->rxmode.offloads;
906 rc = bnxt_vnic_grp_alloc(bp, vnic);
910 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
912 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
917 /* RSS context is required only when there is more than one RSS ring */
918 if (vnic->rx_queue_cnt > 1) {
919 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
922 "HWRM vnic ctx alloc failure: %x\n", rc);
926 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
929 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
930 vnic->vlan_strip = true;
932 vnic->vlan_strip = false;
934 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
938 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
944 static int match_vnic_rss_cfg(struct bnxt *bp,
945 struct bnxt_vnic_info *vnic,
946 const struct rte_flow_action_rss *rss)
948 unsigned int match = 0, i;
950 if (vnic->rx_queue_cnt != rss->queue_num)
953 for (i = 0; i < rss->queue_num; i++) {
954 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
955 !bp->rx_queues[rss->queue[i]]->rx_started)
959 for (i = 0; i < vnic->rx_queue_cnt; i++) {
962 for (j = 0; j < vnic->rx_queue_cnt; j++) {
963 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
969 if (match != vnic->rx_queue_cnt) {
971 "VNIC queue count %d vs queues matched %d\n",
972 match, vnic->rx_queue_cnt);
980 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
981 struct bnxt_filter_info *filter1,
985 !(filter->valid_flags &
986 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
987 BNXT_FLOW_L2_SRC_VALID_FLAG |
988 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
989 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
990 BNXT_FLOW_L2_DROP_FLAG |
991 BNXT_FLOW_PARSE_INNER_FLAG))) {
992 filter->flags = filter1->flags;
993 filter->enables = filter1->enables;
994 filter->filter_type = HWRM_CFA_L2_FILTER;
995 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
996 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
997 filter->pri_hint = filter1->pri_hint;
998 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
1000 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1001 filter->l2_ref_cnt = filter1->l2_ref_cnt;
1003 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1004 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1008 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1009 const struct rte_flow_item pattern[],
1010 const struct rte_flow_action actions[],
1011 const struct rte_flow_attr *attr,
1012 struct rte_flow_error *error,
1013 struct bnxt_filter_info *filter)
1015 const struct rte_flow_action *act =
1016 bnxt_flow_non_void_action(actions);
1017 struct bnxt *bp = dev->data->dev_private;
1018 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1019 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1020 const struct rte_flow_action_queue *act_q;
1021 const struct rte_flow_action_vf *act_vf;
1022 struct bnxt_filter_info *filter1 = NULL;
1023 const struct rte_flow_action_rss *rss;
1024 struct bnxt_rx_queue *rxq = NULL;
1025 int dflt_vnic, vnic_id;
1026 unsigned int rss_idx;
1031 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1035 rc = bnxt_flow_parse_attr(attr, error);
1039 /* Since we support ingress attribute only - right now. */
1040 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1041 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1043 use_ntuple = bnxt_filter_type_check(pattern, error);
1044 switch (act->type) {
1045 case RTE_FLOW_ACTION_TYPE_QUEUE:
1046 /* Allow this flow. Redirect to a VNIC. */
1047 act_q = (const struct rte_flow_action_queue *)act->conf;
1048 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1049 rte_flow_error_set(error,
1051 RTE_FLOW_ERROR_TYPE_ACTION,
1053 "Invalid queue ID.");
1057 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1059 vnic_id = attr->group;
1061 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1062 vnic_id = act_q->index;
1065 vnic = &bp->vnic_info[vnic_id];
1067 rte_flow_error_set(error,
1069 RTE_FLOW_ERROR_TYPE_ACTION,
1071 "No matching VNIC found.");
1075 if (vnic->rx_queue_cnt) {
1076 if (vnic->start_grp_id != act_q->index) {
1078 "VNIC already in use\n");
1079 rte_flow_error_set(error,
1081 RTE_FLOW_ERROR_TYPE_ACTION,
1083 "VNIC already in use");
1090 rxq = bp->rx_queues[act_q->index];
1092 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1093 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1098 "Queue invalid or used with other VNIC\n");
1099 rte_flow_error_set(error,
1101 RTE_FLOW_ERROR_TYPE_ACTION,
1103 "Queue invalid queue or in use");
1109 rxq->rx_started = 1;
1110 vnic->rx_queue_cnt++;
1111 vnic->start_grp_id = act_q->index;
1112 vnic->end_grp_id = act_q->index;
1113 vnic->func_default = 0; //This is not a default VNIC.
1115 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1117 rc = bnxt_vnic_prep(bp, vnic);
1119 rte_flow_error_set(error,
1121 RTE_FLOW_ERROR_TYPE_ACTION,
1129 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1130 act_q->index, vnic, vnic->fw_grp_ids);
1133 vnic->ff_pool_idx = vnic_id;
1135 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1136 filter->dst_id = vnic->fw_vnic_id;
1137 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1138 if (filter1 == NULL) {
1139 rte_flow_error_set(error,
1141 RTE_FLOW_ERROR_TYPE_ACTION,
1143 "Filter not available");
1148 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1149 filter, filter1, filter1->l2_ref_cnt);
1150 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1152 case RTE_FLOW_ACTION_TYPE_DROP:
1153 vnic0 = &bp->vnic_info[0];
1154 filter->dst_id = vnic0->fw_vnic_id;
1155 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1156 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1157 if (filter1 == NULL) {
1158 rte_flow_error_set(error,
1160 RTE_FLOW_ERROR_TYPE_ACTION,
1162 "Filter not available");
1167 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1169 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1170 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1172 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1174 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1176 case RTE_FLOW_ACTION_TYPE_COUNT:
1177 vnic0 = &bp->vnic_info[0];
1178 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1179 if (filter1 == NULL) {
1180 rte_flow_error_set(error,
1182 RTE_FLOW_ERROR_TYPE_ACTION,
1184 "New filter not available");
1189 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1190 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1192 case RTE_FLOW_ACTION_TYPE_VF:
1193 act_vf = (const struct rte_flow_action_vf *)act->conf;
1196 if (filter->tunnel_type ==
1197 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1198 filter->tunnel_type ==
1199 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1200 /* If issued on a VF, ensure id is 0 and is trusted */
1202 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ACTION,
1212 filter->enables |= filter->tunnel_type;
1213 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1217 if (vf >= bp->pdev->max_vfs) {
1218 rte_flow_error_set(error,
1220 RTE_FLOW_ERROR_TYPE_ACTION,
1222 "Incorrect VF id!");
1227 filter->mirror_vnic_id =
1228 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1229 if (dflt_vnic < 0) {
1230 /* This simply indicates there's no driver loaded.
1231 * This is not an error.
1233 rte_flow_error_set(error,
1235 RTE_FLOW_ERROR_TYPE_ACTION,
1237 "Unable to get default VNIC for VF");
1242 filter->mirror_vnic_id = dflt_vnic;
1243 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1245 vnic0 = &bp->vnic_info[0];
1246 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1247 if (filter1 == NULL) {
1248 rte_flow_error_set(error,
1250 RTE_FLOW_ERROR_TYPE_ACTION,
1252 "New filter not available");
1257 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1259 case RTE_FLOW_ACTION_TYPE_RSS:
1260 rss = (const struct rte_flow_action_rss *)act->conf;
1262 vnic_id = attr->group;
1264 PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1265 rte_flow_error_set(error,
1267 RTE_FLOW_ERROR_TYPE_ATTR,
1269 "Group id cannot be 0");
1274 vnic = &bp->vnic_info[vnic_id];
1276 rte_flow_error_set(error,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1280 "No matching VNIC for RSS group.");
1284 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1286 /* Check if requested RSS config matches RSS config of VNIC
1287 * only if it is not a fresh VNIC configuration.
1288 * Otherwise the existing VNIC configuration can be used.
1290 if (vnic->rx_queue_cnt) {
1291 rc = match_vnic_rss_cfg(bp, vnic, rss);
1294 "VNIC and RSS config mismatch\n");
1295 rte_flow_error_set(error,
1297 RTE_FLOW_ERROR_TYPE_ACTION,
1299 "VNIC and RSS cfg mismatch");
1306 for (i = 0; i < rss->queue_num; i++) {
1307 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1310 if (!rss->queue[i] ||
1311 rss->queue[i] >= bp->rx_nr_rings ||
1312 !bp->rx_queues[rss->queue[i]]) {
1313 rte_flow_error_set(error,
1315 RTE_FLOW_ERROR_TYPE_ACTION,
1317 "Invalid queue ID for RSS");
1321 rxq = bp->rx_queues[rss->queue[i]];
1323 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1324 INVALID_HW_RING_ID) {
1326 "queue active with other VNIC\n");
1327 rte_flow_error_set(error,
1329 RTE_FLOW_ERROR_TYPE_ACTION,
1331 "Invalid queue ID for RSS");
1337 rxq->rx_started = 1;
1338 vnic->rx_queue_cnt++;
1341 vnic->start_grp_id = rss->queue[0];
1342 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1343 vnic->func_default = 0; //This is not a default VNIC.
1345 rc = bnxt_vnic_prep(bp, vnic);
1347 rte_flow_error_set(error,
1349 RTE_FLOW_ERROR_TYPE_ACTION,
1357 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1358 vnic_id, vnic, vnic->fw_grp_ids);
1360 vnic->ff_pool_idx = vnic_id;
1362 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1364 /* This can be done only after vnic_grp_alloc is done. */
1365 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1366 vnic->fw_grp_ids[i] =
1367 bp->grp_info[rss->queue[i]].fw_grp_id;
1368 /* Make sure vnic0 does not use these rings. */
1369 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1373 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1374 for (i = 0; i < vnic->rx_queue_cnt; i++)
1375 vnic->rss_table[rss_idx++] =
1376 vnic->fw_grp_ids[i];
1379 /* Configure RSS only if the queue count is > 1 */
1380 if (vnic->rx_queue_cnt > 1) {
1382 bnxt_rte_to_hwrm_hash_types(rss->types);
1384 if (!rss->key_len) {
1385 /* If hash key has not been specified,
1386 * use random hash key.
1388 prandom_bytes(vnic->rss_hash_key,
1391 if (rss->key_len > HW_HASH_KEY_SIZE)
1392 memcpy(vnic->rss_hash_key,
1396 memcpy(vnic->rss_hash_key,
1400 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1402 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1406 filter->dst_id = vnic->fw_vnic_id;
1407 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1408 if (filter1 == NULL) {
1409 rte_flow_error_set(error,
1411 RTE_FLOW_ERROR_TYPE_ACTION,
1413 "New filter not available");
1418 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1419 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1422 rte_flow_error_set(error,
1424 RTE_FLOW_ERROR_TYPE_ACTION,
1431 if (filter1 && !filter->matching_l2_fltr_ptr) {
1432 bnxt_free_filter(bp, filter1);
1433 filter1->fw_l2_filter_id = -1;
1437 act = bnxt_flow_non_void_action(++act);
1438 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1439 rte_flow_error_set(error,
1441 RTE_FLOW_ERROR_TYPE_ACTION,
1451 //TODO: Cleanup according to ACTION TYPE.
1453 if (vnic && STAILQ_EMPTY(&vnic->filter))
1454 vnic->rx_queue_cnt = 0;
1456 if (rxq && !vnic->rx_queue_cnt)
1457 rxq->vnic = &bp->vnic_info[0];
1463 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1464 struct bnxt_filter_info *filter)
1466 struct bnxt_vnic_info *vnic = NULL;
1469 for (i = 0; i < bp->max_vnics; i++) {
1470 vnic = &bp->vnic_info[i];
1471 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1472 filter->dst_id == vnic->fw_vnic_id) {
1473 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1482 bnxt_flow_validate(struct rte_eth_dev *dev,
1483 const struct rte_flow_attr *attr,
1484 const struct rte_flow_item pattern[],
1485 const struct rte_flow_action actions[],
1486 struct rte_flow_error *error)
1488 struct bnxt *bp = dev->data->dev_private;
1489 struct bnxt_vnic_info *vnic = NULL;
1490 struct bnxt_filter_info *filter;
1493 bnxt_acquire_flow_lock(bp);
1494 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1496 bnxt_release_flow_lock(bp);
1500 filter = bnxt_get_unused_filter(bp);
1501 if (filter == NULL) {
1502 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1503 bnxt_release_flow_lock(bp);
1507 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1512 vnic = find_matching_vnic(bp, filter);
1514 if (STAILQ_EMPTY(&vnic->filter)) {
1515 rte_free(vnic->fw_grp_ids);
1516 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1517 bnxt_hwrm_vnic_free(bp, vnic);
1518 vnic->rx_queue_cnt = 0;
1519 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1523 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1524 bnxt_hwrm_clear_em_filter(bp, filter);
1525 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1526 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1528 bnxt_hwrm_clear_l2_filter(bp, filter);
1531 /* No need to hold on to this filter if we are just validating flow */
1532 filter->fw_l2_filter_id = UINT64_MAX;
1533 bnxt_free_filter(bp, filter);
1534 bnxt_release_flow_lock(bp);
1540 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1541 struct bnxt_filter_info *new_filter)
1543 /* Clear the new L2 filter that was created in the previous step in
1544 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1545 * filter which points to the new destination queue and so we clear
1546 * the previous L2 filter. For ntuple filters, we are going to reuse
1547 * the old L2 filter and create new NTUPLE filter with this new
1548 * destination queue subsequently during bnxt_flow_create.
1550 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1551 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1552 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1554 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1555 bnxt_hwrm_clear_em_filter(bp, old_filter);
1556 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1557 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1562 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1564 struct bnxt_filter_info *mf;
1565 struct rte_flow *flow;
1568 for (i = bp->max_vnics - 1; i >= 0; i--) {
1569 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1571 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1574 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1577 if (mf->filter_type == nf->filter_type &&
1578 mf->flags == nf->flags &&
1579 mf->src_port == nf->src_port &&
1580 mf->src_port_mask == nf->src_port_mask &&
1581 mf->dst_port == nf->dst_port &&
1582 mf->dst_port_mask == nf->dst_port_mask &&
1583 mf->ip_protocol == nf->ip_protocol &&
1584 mf->ip_addr_type == nf->ip_addr_type &&
1585 mf->ethertype == nf->ethertype &&
1586 mf->vni == nf->vni &&
1587 mf->tunnel_type == nf->tunnel_type &&
1588 mf->l2_ovlan == nf->l2_ovlan &&
1589 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1590 mf->l2_ivlan == nf->l2_ivlan &&
1591 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1592 !memcmp(mf->l2_addr, nf->l2_addr,
1593 RTE_ETHER_ADDR_LEN) &&
1594 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1595 RTE_ETHER_ADDR_LEN) &&
1596 !memcmp(mf->src_macaddr, nf->src_macaddr,
1597 RTE_ETHER_ADDR_LEN) &&
1598 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1599 RTE_ETHER_ADDR_LEN) &&
1600 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1601 sizeof(nf->src_ipaddr)) &&
1602 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1603 sizeof(nf->src_ipaddr_mask)) &&
1604 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1605 sizeof(nf->dst_ipaddr)) &&
1606 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1607 sizeof(nf->dst_ipaddr_mask))) {
1608 if (mf->dst_id == nf->dst_id)
1610 /* Free the old filter, update flow
1613 bnxt_update_filter(bp, mf, nf);
1614 STAILQ_REMOVE(&vnic->filter, mf,
1615 bnxt_filter_info, next);
1616 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1617 bnxt_free_filter(bp, mf);
1626 static struct rte_flow *
1627 bnxt_flow_create(struct rte_eth_dev *dev,
1628 const struct rte_flow_attr *attr,
1629 const struct rte_flow_item pattern[],
1630 const struct rte_flow_action actions[],
1631 struct rte_flow_error *error)
1633 struct bnxt *bp = dev->data->dev_private;
1634 struct bnxt_vnic_info *vnic = NULL;
1635 struct bnxt_filter_info *filter;
1636 bool update_flow = false;
1637 struct rte_flow *flow;
1641 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1642 rte_flow_error_set(error, EINVAL,
1643 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1644 "Failed to create flow, Not a Trusted VF!");
1648 if (!dev->data->dev_started) {
1649 rte_flow_error_set(error,
1651 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1653 "Device must be started");
1657 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1659 rte_flow_error_set(error, ENOMEM,
1660 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1661 "Failed to allocate memory");
1665 bnxt_acquire_flow_lock(bp);
1666 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1668 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1672 filter = bnxt_get_unused_filter(bp);
1673 if (filter == NULL) {
1674 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1678 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1683 ret = bnxt_match_filter(bp, filter);
1684 if (ret == -EEXIST) {
1685 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1686 /* Clear the filter that was created as part of
1687 * validate_and_parse_flow() above
1689 bnxt_hwrm_clear_l2_filter(bp, filter);
1691 } else if (ret == -EXDEV) {
1692 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1693 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1697 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1698 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1701 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1702 filter->enables == filter->tunnel_type) {
1703 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1705 rte_flow_error_set(error, -ret,
1706 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1707 "Unable to query tunnel to VF");
1710 if (tun_type == (1U << filter->tunnel_type)) {
1712 bnxt_hwrm_tunnel_redirect_free(bp,
1713 filter->tunnel_type);
1716 "Unable to free existing tunnel\n");
1717 rte_flow_error_set(error, -ret,
1718 RTE_FLOW_ERROR_TYPE_HANDLE,
1720 "Unable to free preexisting "
1725 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1727 rte_flow_error_set(error, -ret,
1728 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1729 "Unable to redirect tunnel to VF");
1732 vnic = &bp->vnic_info[0];
1736 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1738 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1739 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1742 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1744 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1745 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1748 vnic = find_matching_vnic(bp, filter);
1750 if (!ret || update_flow) {
1751 flow->filter = filter;
1758 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1759 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1760 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1761 bnxt_release_flow_lock(bp);
1766 bnxt_free_filter(bp, filter);
1769 rte_flow_error_set(error, ret,
1770 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1771 "Matching Flow exists.");
1772 else if (ret == -EXDEV)
1773 rte_flow_error_set(error, 0,
1774 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1775 "Flow with pattern exists, updating destination queue");
1777 rte_flow_error_set(error, -ret,
1778 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1779 "Failed to create flow.");
1782 bnxt_release_flow_lock(bp);
1786 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1787 struct bnxt_filter_info *filter,
1788 struct rte_flow_error *error)
1790 uint16_t tun_dst_fid;
1794 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1796 rte_flow_error_set(error, -ret,
1797 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1798 "Unable to query tunnel to VF");
1801 if (tun_type == (1U << filter->tunnel_type)) {
1802 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1805 rte_flow_error_set(error, -ret,
1806 RTE_FLOW_ERROR_TYPE_HANDLE,
1808 "tunnel_redirect info cmd fail");
1811 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1812 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1814 /* Tunnel doesn't belong to this VF, so don't send HWRM
1815 * cmd, just delete the flow from driver
1817 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1819 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1821 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1822 filter->tunnel_type);
1828 bnxt_flow_destroy(struct rte_eth_dev *dev,
1829 struct rte_flow *flow,
1830 struct rte_flow_error *error)
1832 struct bnxt *bp = dev->data->dev_private;
1833 struct bnxt_filter_info *filter;
1834 struct bnxt_vnic_info *vnic;
1837 bnxt_acquire_flow_lock(bp);
1839 rte_flow_error_set(error, EINVAL,
1840 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1841 "Invalid flow: failed to destroy flow.");
1842 bnxt_release_flow_lock(bp);
1846 filter = flow->filter;
1850 rte_flow_error_set(error, EINVAL,
1851 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1852 "Invalid flow: failed to destroy flow.");
1853 bnxt_release_flow_lock(bp);
1857 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1858 filter->enables == filter->tunnel_type) {
1859 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1865 bnxt_release_flow_lock(bp);
1870 ret = bnxt_match_filter(bp, filter);
1872 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1874 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1875 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1876 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1877 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1878 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1882 /* If it is a L2 drop filter, when the filter is created,
1883 * the FW updates the BC/MC records.
1884 * Once this filter is removed, issue the set_rx_mask command
1885 * to reset the BC/MC records in the HW to the settings
1886 * before the drop counter is created.
1888 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1889 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1891 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1892 bnxt_free_filter(bp, filter);
1893 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1896 /* If this was the last flow associated with this vnic,
1897 * switch the queue back to RSS pool.
1899 if (vnic && !vnic->func_default &&
1900 STAILQ_EMPTY(&vnic->flow_list)) {
1901 rte_free(vnic->fw_grp_ids);
1902 if (vnic->rx_queue_cnt > 1)
1903 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1905 bnxt_hwrm_vnic_free(bp, vnic);
1906 vnic->rx_queue_cnt = 0;
1909 rte_flow_error_set(error, -ret,
1910 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1911 "Failed to destroy flow.");
1914 bnxt_release_flow_lock(bp);
1919 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1921 struct bnxt *bp = dev->data->dev_private;
1922 struct bnxt_filter_info *filter = NULL;
1923 struct bnxt_vnic_info *vnic;
1924 struct rte_flow *flow;
1928 bnxt_acquire_flow_lock(bp);
1929 for (i = 0; i < bp->max_vnics; i++) {
1930 vnic = &bp->vnic_info[i];
1931 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1934 while (!STAILQ_EMPTY(&vnic->flow_list)) {
1935 flow = STAILQ_FIRST(&vnic->flow_list);
1936 filter = flow->filter;
1938 if (filter->filter_type ==
1939 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1940 filter->enables == filter->tunnel_type) {
1942 bnxt_handle_tunnel_redirect_destroy(bp,
1948 bnxt_release_flow_lock(bp);
1953 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1954 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1955 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1956 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1958 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1964 RTE_FLOW_ERROR_TYPE_HANDLE,
1966 "Failed to flush flow in HW.");
1967 bnxt_release_flow_lock(bp);
1971 STAILQ_REMOVE(&vnic->flow_list, flow,
1974 STAILQ_REMOVE(&vnic->filter,
1978 bnxt_free_filter(bp, filter);
1982 /* If this was the last flow associated with this vnic,
1983 * switch the queue back to RSS pool.
1985 if (STAILQ_EMPTY(&vnic->flow_list)) {
1986 rte_free(vnic->fw_grp_ids);
1987 if (vnic->rx_queue_cnt > 1)
1988 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1989 bnxt_hwrm_vnic_free(bp, vnic);
1990 vnic->rx_queue_cnt = 0;
1995 bnxt_release_flow_lock(bp);
1999 const struct rte_flow_ops bnxt_flow_ops = {
2000 .validate = bnxt_flow_validate,
2001 .create = bnxt_flow_create,
2002 .destroy = bnxt_flow_destroy,
2003 .flush = bnxt_flow_flush,