1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25 const struct rte_flow_item pattern[],
26 const struct rte_flow_action actions[],
27 struct rte_flow_error *error)
30 rte_flow_error_set(error,
32 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
39 rte_flow_error_set(error,
41 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
48 rte_flow_error_set(error,
50 RTE_FLOW_ERROR_TYPE_ATTR,
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81 struct rte_flow_error *error __rte_unused)
83 const struct rte_flow_item *item =
84 bnxt_flow_non_void_item(pattern);
88 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
90 case RTE_FLOW_ITEM_TYPE_ANY:
91 case RTE_FLOW_ITEM_TYPE_ETH:
94 case RTE_FLOW_ITEM_TYPE_VLAN:
98 case RTE_FLOW_ITEM_TYPE_IPV4:
99 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_TCP:
101 case RTE_FLOW_ITEM_TYPE_UDP:
103 /* need ntuple match, reset exact match */
107 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
113 if (has_vlan && use_ntuple) {
115 "VLAN flow cannot use NTUPLE filter\n");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM,
119 "Cannot use VLAN with NTUPLE");
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128 const struct rte_flow_attr *attr,
129 const struct rte_flow_item pattern[],
130 struct rte_flow_error *error,
131 struct bnxt_filter_info *filter)
133 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138 const struct rte_flow_item_udp *udp_spec, *udp_mask;
139 const struct rte_flow_item_eth *eth_spec, *eth_mask;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 switch (item->type) {
181 case RTE_FLOW_ITEM_TYPE_ANY:
183 ((const struct rte_flow_item_any *)item->spec)->num > 3;
185 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
187 case RTE_FLOW_ITEM_TYPE_ETH:
188 if (!item->spec || !item->mask)
191 eth_spec = item->spec;
192 eth_mask = item->mask;
194 /* Source MAC address mask cannot be partially set.
195 * Should be All 0's or all 1's.
196 * Destination MAC address mask must not be partially
197 * set. Should be all 1's or all 0's.
199 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
200 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
201 (!rte_is_zero_ether_addr(ð_mask->dst) &&
202 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
203 rte_flow_error_set(error,
205 RTE_FLOW_ERROR_TYPE_ITEM,
207 "MAC_addr mask not valid");
211 /* Mask is not allowed. Only exact matches are */
212 if (eth_mask->type &&
213 eth_mask->type != RTE_BE16(0xffff)) {
214 rte_flow_error_set(error, EINVAL,
215 RTE_FLOW_ERROR_TYPE_ITEM,
217 "ethertype mask not valid");
221 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
222 rte_memcpy(filter->dst_macaddr,
223 ð_spec->dst, RTE_ETHER_ADDR_LEN);
225 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
226 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
227 valid_flags |= inner ?
228 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
229 BNXT_FLOW_L2_DST_VALID_FLAG;
232 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
233 rte_memcpy(filter->src_macaddr,
234 ð_spec->src, RTE_ETHER_ADDR_LEN);
236 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
237 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
238 valid_flags |= inner ?
239 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
240 BNXT_FLOW_L2_SRC_VALID_FLAG;
243 * PMD_DRV_LOG(ERR, "Handle this condition\n");
246 if (eth_mask->type) {
248 rte_be_to_cpu_16(eth_spec->type);
253 case RTE_FLOW_ITEM_TYPE_VLAN:
254 vlan_spec = item->spec;
255 vlan_mask = item->mask;
256 if (en & en_ethertype) {
257 rte_flow_error_set(error, EINVAL,
258 RTE_FLOW_ERROR_TYPE_ITEM,
260 "VLAN TPID matching is not"
264 if (vlan_mask->tci &&
265 vlan_mask->tci == RTE_BE16(0x0fff)) {
266 /* Only the VLAN ID can be matched. */
268 rte_be_to_cpu_16(vlan_spec->tci &
270 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
272 rte_flow_error_set(error,
274 RTE_FLOW_ERROR_TYPE_ITEM,
276 "VLAN mask is invalid");
279 if (vlan_mask->inner_type &&
280 vlan_mask->inner_type != RTE_BE16(0xffff)) {
281 rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ITEM,
284 "inner ethertype mask not"
288 if (vlan_mask->inner_type) {
290 rte_be_to_cpu_16(vlan_spec->inner_type);
295 case RTE_FLOW_ITEM_TYPE_IPV4:
296 /* If mask is not involved, we could use EM filters. */
297 ipv4_spec = item->spec;
298 ipv4_mask = item->mask;
300 if (!item->spec || !item->mask)
303 /* Only IP DST and SRC fields are maskable. */
304 if (ipv4_mask->hdr.version_ihl ||
305 ipv4_mask->hdr.type_of_service ||
306 ipv4_mask->hdr.total_length ||
307 ipv4_mask->hdr.packet_id ||
308 ipv4_mask->hdr.fragment_offset ||
309 ipv4_mask->hdr.time_to_live ||
310 ipv4_mask->hdr.next_proto_id ||
311 ipv4_mask->hdr.hdr_checksum) {
312 rte_flow_error_set(error,
314 RTE_FLOW_ERROR_TYPE_ITEM,
316 "Invalid IPv4 mask.");
320 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
321 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
324 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
325 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
327 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
328 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
330 if (ipv4_mask->hdr.src_addr) {
331 filter->src_ipaddr_mask[0] =
332 ipv4_mask->hdr.src_addr;
333 en |= !use_ntuple ? 0 :
334 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
337 if (ipv4_mask->hdr.dst_addr) {
338 filter->dst_ipaddr_mask[0] =
339 ipv4_mask->hdr.dst_addr;
340 en |= !use_ntuple ? 0 :
341 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
344 filter->ip_addr_type = use_ntuple ?
345 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
346 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
348 if (ipv4_spec->hdr.next_proto_id) {
349 filter->ip_protocol =
350 ipv4_spec->hdr.next_proto_id;
352 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
354 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
357 case RTE_FLOW_ITEM_TYPE_IPV6:
358 ipv6_spec = item->spec;
359 ipv6_mask = item->mask;
361 if (!item->spec || !item->mask)
364 /* Only IP DST and SRC fields are maskable. */
365 if (ipv6_mask->hdr.vtc_flow ||
366 ipv6_mask->hdr.payload_len ||
367 ipv6_mask->hdr.proto ||
368 ipv6_mask->hdr.hop_limits) {
369 rte_flow_error_set(error,
371 RTE_FLOW_ERROR_TYPE_ITEM,
373 "Invalid IPv6 mask.");
378 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
379 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
381 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
382 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
384 rte_memcpy(filter->src_ipaddr,
385 ipv6_spec->hdr.src_addr, 16);
386 rte_memcpy(filter->dst_ipaddr,
387 ipv6_spec->hdr.dst_addr, 16);
389 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
391 rte_memcpy(filter->src_ipaddr_mask,
392 ipv6_mask->hdr.src_addr, 16);
393 en |= !use_ntuple ? 0 :
394 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
397 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
399 rte_memcpy(filter->dst_ipaddr_mask,
400 ipv6_mask->hdr.dst_addr, 16);
401 en |= !use_ntuple ? 0 :
402 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
405 filter->ip_addr_type = use_ntuple ?
406 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
407 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
409 case RTE_FLOW_ITEM_TYPE_TCP:
410 tcp_spec = item->spec;
411 tcp_mask = item->mask;
413 if (!item->spec || !item->mask)
416 /* Check TCP mask. Only DST & SRC ports are maskable */
417 if (tcp_mask->hdr.sent_seq ||
418 tcp_mask->hdr.recv_ack ||
419 tcp_mask->hdr.data_off ||
420 tcp_mask->hdr.tcp_flags ||
421 tcp_mask->hdr.rx_win ||
422 tcp_mask->hdr.cksum ||
423 tcp_mask->hdr.tcp_urp) {
424 rte_flow_error_set(error,
426 RTE_FLOW_ERROR_TYPE_ITEM,
432 filter->src_port = tcp_spec->hdr.src_port;
433 filter->dst_port = tcp_spec->hdr.dst_port;
436 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
437 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
439 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
440 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
442 if (tcp_mask->hdr.dst_port) {
443 filter->dst_port_mask = tcp_mask->hdr.dst_port;
444 en |= !use_ntuple ? 0 :
445 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
448 if (tcp_mask->hdr.src_port) {
449 filter->src_port_mask = tcp_mask->hdr.src_port;
450 en |= !use_ntuple ? 0 :
451 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
454 case RTE_FLOW_ITEM_TYPE_UDP:
455 udp_spec = item->spec;
456 udp_mask = item->mask;
458 if (!item->spec || !item->mask)
461 if (udp_mask->hdr.dgram_len ||
462 udp_mask->hdr.dgram_cksum) {
463 rte_flow_error_set(error,
465 RTE_FLOW_ERROR_TYPE_ITEM,
471 filter->src_port = udp_spec->hdr.src_port;
472 filter->dst_port = udp_spec->hdr.dst_port;
475 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
476 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
478 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
479 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
481 if (udp_mask->hdr.dst_port) {
482 filter->dst_port_mask = udp_mask->hdr.dst_port;
483 en |= !use_ntuple ? 0 :
484 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
487 if (udp_mask->hdr.src_port) {
488 filter->src_port_mask = udp_mask->hdr.src_port;
489 en |= !use_ntuple ? 0 :
490 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
493 case RTE_FLOW_ITEM_TYPE_VXLAN:
494 vxlan_spec = item->spec;
495 vxlan_mask = item->mask;
496 /* Check if VXLAN item is used to describe protocol.
497 * If yes, both spec and mask should be NULL.
498 * If no, both spec and mask shouldn't be NULL.
500 if ((!vxlan_spec && vxlan_mask) ||
501 (vxlan_spec && !vxlan_mask)) {
502 rte_flow_error_set(error,
504 RTE_FLOW_ERROR_TYPE_ITEM,
506 "Invalid VXLAN item");
510 if (!vxlan_spec && !vxlan_mask) {
511 filter->tunnel_type =
512 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
516 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
517 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
518 vxlan_spec->flags != 0x8) {
519 rte_flow_error_set(error,
521 RTE_FLOW_ERROR_TYPE_ITEM,
523 "Invalid VXLAN item");
527 /* Check if VNI is masked. */
528 if (vxlan_spec && vxlan_mask) {
530 !!memcmp(vxlan_mask->vni, vni_mask,
536 RTE_FLOW_ERROR_TYPE_ITEM,
542 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
545 rte_be_to_cpu_32(tenant_id_be);
546 filter->tunnel_type =
547 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
550 case RTE_FLOW_ITEM_TYPE_NVGRE:
551 nvgre_spec = item->spec;
552 nvgre_mask = item->mask;
553 /* Check if NVGRE item is used to describe protocol.
554 * If yes, both spec and mask should be NULL.
555 * If no, both spec and mask shouldn't be NULL.
557 if ((!nvgre_spec && nvgre_mask) ||
558 (nvgre_spec && !nvgre_mask)) {
559 rte_flow_error_set(error,
561 RTE_FLOW_ERROR_TYPE_ITEM,
563 "Invalid NVGRE item");
567 if (!nvgre_spec && !nvgre_mask) {
568 filter->tunnel_type =
569 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
573 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
574 nvgre_spec->protocol != 0x6558) {
575 rte_flow_error_set(error,
577 RTE_FLOW_ERROR_TYPE_ITEM,
579 "Invalid NVGRE item");
583 if (nvgre_spec && nvgre_mask) {
585 !!memcmp(nvgre_mask->tni, tni_mask,
591 RTE_FLOW_ERROR_TYPE_ITEM,
596 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
599 rte_be_to_cpu_32(tenant_id_be);
600 filter->tunnel_type =
601 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
605 case RTE_FLOW_ITEM_TYPE_GRE:
606 gre_spec = (const struct rte_flow_item_gre *)item->spec;
607 gre_mask = (const struct rte_flow_item_gre *)item->mask;
610 *Check if GRE item is used to describe protocol.
611 * If yes, both spec and mask should be NULL.
612 * If no, both spec and mask shouldn't be NULL.
614 if (!!gre_spec ^ !!gre_mask) {
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_ITEM,
622 if (!gre_spec && !gre_mask) {
623 filter->tunnel_type =
624 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
629 case RTE_FLOW_ITEM_TYPE_VF:
630 vf_spec = item->spec;
633 rte_flow_error_set(error,
635 RTE_FLOW_ERROR_TYPE_ITEM,
637 "Configuring on a VF!");
641 if (vf >= bp->pdev->max_vfs) {
642 rte_flow_error_set(error,
644 RTE_FLOW_ERROR_TYPE_ITEM,
650 if (!attr->transfer) {
651 rte_flow_error_set(error,
653 RTE_FLOW_ERROR_TYPE_ITEM,
655 "Matching VF traffic without"
656 " affecting it (transfer attribute)"
661 filter->mirror_vnic_id =
662 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
664 /* This simply indicates there's no driver
665 * loaded. This is not an error.
670 RTE_FLOW_ERROR_TYPE_ITEM,
672 "Unable to get default VNIC for VF");
676 filter->mirror_vnic_id = dflt_vnic;
677 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
684 filter->enables = en;
685 filter->valid_flags = valid_flags;
690 /* Parse attributes */
692 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
693 struct rte_flow_error *error)
695 /* Must be input direction */
696 if (!attr->ingress) {
697 rte_flow_error_set(error,
699 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
701 "Only support ingress.");
707 rte_flow_error_set(error,
709 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
711 "No support for egress.");
716 if (attr->priority) {
717 rte_flow_error_set(error,
719 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
721 "No support for priority.");
727 static struct bnxt_filter_info *
728 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
730 struct bnxt_filter_info *mf, *f0;
731 struct bnxt_vnic_info *vnic0;
732 struct rte_flow *flow;
735 vnic0 = &bp->vnic_info[0];
736 f0 = STAILQ_FIRST(&vnic0->filter);
738 /* This flow has same DST MAC as the port/l2 filter. */
739 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
742 for (i = bp->max_vnics - 1; i >= 0; i--) {
743 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
745 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
748 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
751 if (mf->matching_l2_fltr_ptr)
754 if (mf->ethertype == nf->ethertype &&
755 mf->l2_ovlan == nf->l2_ovlan &&
756 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
757 mf->l2_ivlan == nf->l2_ivlan &&
758 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
759 !memcmp(mf->src_macaddr, nf->src_macaddr,
760 RTE_ETHER_ADDR_LEN) &&
761 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
769 static struct bnxt_filter_info *
770 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
771 struct bnxt_vnic_info *vnic)
773 struct bnxt_filter_info *filter1;
776 /* Alloc new L2 filter.
777 * This flow needs MAC filter which does not match any existing
780 filter1 = bnxt_get_unused_filter(bp);
784 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
785 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
786 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
787 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
789 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
790 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
793 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
794 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
795 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
796 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
798 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
799 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
801 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
802 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
805 if (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
806 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG) {
807 /* Tell the FW where to place the filter in the table. */
809 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
810 /* This will place the filter in TCAM */
811 filter1->l2_filter_id_hint = (uint64_t)-1;
814 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
815 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
816 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
817 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
820 bnxt_free_filter(bp, filter1);
823 filter1->l2_ref_cnt++;
827 struct bnxt_filter_info *
828 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
829 struct bnxt_vnic_info *vnic)
831 struct bnxt_filter_info *l2_filter = NULL;
833 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
835 l2_filter->l2_ref_cnt++;
836 nf->matching_l2_fltr_ptr = l2_filter;
838 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
839 nf->matching_l2_fltr_ptr = NULL;
845 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
847 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
848 uint64_t rx_offloads = dev_conf->rxmode.offloads;
851 rc = bnxt_vnic_grp_alloc(bp, vnic);
855 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
857 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
862 /* RSS context is required only when there is more than one RSS ring */
863 if (vnic->rx_queue_cnt > 1) {
864 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
867 "HWRM vnic ctx alloc failure: %x\n", rc);
871 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
874 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
875 vnic->vlan_strip = true;
877 vnic->vlan_strip = false;
879 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
883 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
889 static int match_vnic_rss_cfg(struct bnxt *bp,
890 struct bnxt_vnic_info *vnic,
891 const struct rte_flow_action_rss *rss)
893 unsigned int match = 0, i;
895 if (vnic->rx_queue_cnt != rss->queue_num)
898 for (i = 0; i < rss->queue_num; i++) {
899 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
900 !bp->rx_queues[rss->queue[i]]->rx_started)
904 for (i = 0; i < vnic->rx_queue_cnt; i++) {
907 for (j = 0; j < vnic->rx_queue_cnt; j++) {
908 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
914 if (match != vnic->rx_queue_cnt) {
916 "VNIC queue count %d vs queues matched %d\n",
917 match, vnic->rx_queue_cnt);
925 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
926 struct bnxt_filter_info *filter1,
930 !(filter->valid_flags &
931 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
932 BNXT_FLOW_L2_SRC_VALID_FLAG |
933 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
934 BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
935 filter->flags = filter1->flags;
936 filter->enables = filter1->enables;
937 filter->filter_type = HWRM_CFA_L2_FILTER;
938 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
939 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
940 filter->pri_hint = filter1->pri_hint;
941 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
943 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
944 filter->l2_ref_cnt = filter1->l2_ref_cnt;
946 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
947 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
951 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
952 const struct rte_flow_item pattern[],
953 const struct rte_flow_action actions[],
954 const struct rte_flow_attr *attr,
955 struct rte_flow_error *error,
956 struct bnxt_filter_info *filter)
958 const struct rte_flow_action *act =
959 bnxt_flow_non_void_action(actions);
960 struct bnxt *bp = dev->data->dev_private;
961 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
962 const struct rte_flow_action_queue *act_q;
963 const struct rte_flow_action_vf *act_vf;
964 struct bnxt_filter_info *filter1 = NULL;
965 const struct rte_flow_action_rss *rss;
966 struct bnxt_vnic_info *vnic, *vnic0;
967 struct bnxt_rx_queue *rxq = NULL;
968 int dflt_vnic, vnic_id;
969 unsigned int rss_idx;
974 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
978 rc = bnxt_flow_parse_attr(attr, error);
982 /* Since we support ingress attribute only - right now. */
983 if (filter->filter_type == HWRM_CFA_EM_FILTER)
984 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
986 use_ntuple = bnxt_filter_type_check(pattern, error);
988 case RTE_FLOW_ACTION_TYPE_QUEUE:
989 /* Allow this flow. Redirect to a VNIC. */
990 act_q = (const struct rte_flow_action_queue *)act->conf;
991 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
992 rte_flow_error_set(error,
994 RTE_FLOW_ERROR_TYPE_ACTION,
996 "Invalid queue ID.");
1000 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1002 vnic_id = attr->group;
1004 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1005 vnic_id = act_q->index;
1008 vnic = &bp->vnic_info[vnic_id];
1010 rte_flow_error_set(error,
1012 RTE_FLOW_ERROR_TYPE_ACTION,
1014 "No matching VNIC found.");
1018 if (vnic->rx_queue_cnt) {
1019 if (vnic->start_grp_id != act_q->index) {
1021 "VNIC already in use\n");
1022 rte_flow_error_set(error,
1024 RTE_FLOW_ERROR_TYPE_ACTION,
1026 "VNIC already in use");
1033 rxq = bp->rx_queues[act_q->index];
1035 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1036 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1040 //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1041 //INVALID_HW_RING_ID ||
1042 //!rxq->rx_deferred_start) {
1044 bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1045 INVALID_HW_RING_ID) {
1047 "Queue invalid or used with other VNIC\n");
1048 rte_flow_error_set(error,
1050 RTE_FLOW_ERROR_TYPE_ACTION,
1052 "Queue invalid queue or in use");
1058 rxq->rx_started = 1;
1059 vnic->rx_queue_cnt++;
1060 vnic->start_grp_id = act_q->index;
1061 vnic->end_grp_id = act_q->index;
1062 vnic->func_default = 0; //This is not a default VNIC.
1064 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1066 rc = bnxt_vnic_prep(bp, vnic);
1071 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1072 act_q->index, vnic, vnic->fw_grp_ids);
1075 vnic->ff_pool_idx = vnic_id;
1077 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1078 filter->dst_id = vnic->fw_vnic_id;
1079 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1080 if (filter1 == NULL) {
1085 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1086 filter, filter1, filter1->l2_ref_cnt);
1087 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1089 case RTE_FLOW_ACTION_TYPE_DROP:
1090 vnic0 = &bp->vnic_info[0];
1091 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1092 if (filter1 == NULL) {
1097 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1098 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1100 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1103 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1105 case RTE_FLOW_ACTION_TYPE_COUNT:
1106 vnic0 = &bp->vnic_info[0];
1107 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1108 if (filter1 == NULL) {
1113 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1114 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1116 case RTE_FLOW_ACTION_TYPE_VF:
1117 act_vf = (const struct rte_flow_action_vf *)act->conf;
1120 if (filter->tunnel_type ==
1121 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1122 filter->tunnel_type ==
1123 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1124 /* If issued on a VF, ensure id is 0 and is trusted */
1126 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1127 rte_flow_error_set(error, EINVAL,
1128 RTE_FLOW_ERROR_TYPE_ACTION,
1136 filter->enables |= filter->tunnel_type;
1137 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1141 if (vf >= bp->pdev->max_vfs) {
1142 rte_flow_error_set(error,
1144 RTE_FLOW_ERROR_TYPE_ACTION,
1146 "Incorrect VF id!");
1151 filter->mirror_vnic_id =
1152 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1153 if (dflt_vnic < 0) {
1154 /* This simply indicates there's no driver loaded.
1155 * This is not an error.
1157 rte_flow_error_set(error,
1159 RTE_FLOW_ERROR_TYPE_ACTION,
1161 "Unable to get default VNIC for VF");
1166 filter->mirror_vnic_id = dflt_vnic;
1167 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1169 vnic0 = &bp->vnic_info[0];
1170 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1171 if (filter1 == NULL) {
1176 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1178 case RTE_FLOW_ACTION_TYPE_RSS:
1179 rss = (const struct rte_flow_action_rss *)act->conf;
1181 vnic_id = attr->group;
1183 PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1184 rte_flow_error_set(error,
1186 RTE_FLOW_ERROR_TYPE_ATTR,
1188 "Group id cannot be 0");
1193 vnic = &bp->vnic_info[vnic_id];
1195 rte_flow_error_set(error,
1197 RTE_FLOW_ERROR_TYPE_ACTION,
1199 "No matching VNIC for RSS group.");
1203 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1205 /* Check if requested RSS config matches RSS config of VNIC
1206 * only if it is not a fresh VNIC configuration.
1207 * Otherwise the existing VNIC configuration can be used.
1209 if (vnic->rx_queue_cnt) {
1210 rc = match_vnic_rss_cfg(bp, vnic, rss);
1213 "VNIC and RSS config mismatch\n");
1214 rte_flow_error_set(error,
1216 RTE_FLOW_ERROR_TYPE_ACTION,
1218 "VNIC and RSS cfg mismatch");
1225 for (i = 0; i < rss->queue_num; i++) {
1226 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1229 if (!rss->queue[i] ||
1230 rss->queue[i] >= bp->rx_nr_rings ||
1231 !bp->rx_queues[rss->queue[i]]) {
1232 rte_flow_error_set(error,
1234 RTE_FLOW_ERROR_TYPE_ACTION,
1236 "Invalid queue ID for RSS");
1240 rxq = bp->rx_queues[rss->queue[i]];
1242 //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1243 //INVALID_HW_RING_ID ||
1244 //!rxq->rx_deferred_start) {
1245 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1246 INVALID_HW_RING_ID) {
1248 "queue active with other VNIC\n");
1249 rte_flow_error_set(error,
1251 RTE_FLOW_ERROR_TYPE_ACTION,
1253 "Invalid queue ID for RSS");
1259 rxq->rx_started = 1;
1260 vnic->rx_queue_cnt++;
1263 vnic->start_grp_id = rss->queue[0];
1264 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1265 vnic->func_default = 0; //This is not a default VNIC.
1267 rc = bnxt_vnic_prep(bp, vnic);
1272 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1273 vnic_id, vnic, vnic->fw_grp_ids);
1275 vnic->ff_pool_idx = vnic_id;
1277 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1279 /* This can be done only after vnic_grp_alloc is done. */
1280 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1281 vnic->fw_grp_ids[i] =
1282 bp->grp_info[rss->queue[i]].fw_grp_id;
1283 /* Make sure vnic0 does not use these rings. */
1284 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1288 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1289 for (i = 0; i < vnic->rx_queue_cnt; i++)
1290 vnic->rss_table[rss_idx++] =
1291 vnic->fw_grp_ids[i];
1294 /* Configure RSS only if the queue count is > 1 */
1295 if (vnic->rx_queue_cnt > 1) {
1297 bnxt_rte_to_hwrm_hash_types(rss->types);
1299 if (!rss->key_len) {
1300 /* If hash key has not been specified,
1301 * use random hash key.
1303 prandom_bytes(vnic->rss_hash_key,
1306 if (rss->key_len > HW_HASH_KEY_SIZE)
1307 memcpy(vnic->rss_hash_key,
1311 memcpy(vnic->rss_hash_key,
1315 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1317 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1321 filter->dst_id = vnic->fw_vnic_id;
1322 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1323 if (filter1 == NULL) {
1328 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1329 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1332 rte_flow_error_set(error,
1334 RTE_FLOW_ERROR_TYPE_ACTION,
1341 if (filter1 && !filter->matching_l2_fltr_ptr) {
1342 bnxt_free_filter(bp, filter1);
1343 filter1->fw_l2_filter_id = -1;
1347 act = bnxt_flow_non_void_action(++act);
1348 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1349 rte_flow_error_set(error,
1351 RTE_FLOW_ERROR_TYPE_ACTION,
1362 bnxt_flow_validate(struct rte_eth_dev *dev,
1363 const struct rte_flow_attr *attr,
1364 const struct rte_flow_item pattern[],
1365 const struct rte_flow_action actions[],
1366 struct rte_flow_error *error)
1368 struct bnxt *bp = dev->data->dev_private;
1369 struct bnxt_filter_info *filter;
1372 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1376 filter = bnxt_get_unused_filter(bp);
1377 if (filter == NULL) {
1378 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1382 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1384 /* No need to hold on to this filter if we are just validating flow */
1385 filter->fw_l2_filter_id = UINT64_MAX;
1386 bnxt_free_filter(bp, filter);
1392 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1393 struct bnxt_filter_info *new_filter)
1395 /* Clear the new L2 filter that was created in the previous step in
1396 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1397 * filter which points to the new destination queue and so we clear
1398 * the previous L2 filter. For ntuple filters, we are going to reuse
1399 * the old L2 filter and create new NTUPLE filter with this new
1400 * destination queue subsequently during bnxt_flow_create.
1402 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1403 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1404 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1406 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1407 bnxt_hwrm_clear_em_filter(bp, old_filter);
1408 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1409 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1414 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1416 struct bnxt_filter_info *mf;
1417 struct rte_flow *flow;
1420 for (i = bp->max_vnics; i >= 0; i--) {
1421 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1423 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1426 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1429 if (mf->filter_type == nf->filter_type &&
1430 mf->flags == nf->flags &&
1431 mf->src_port == nf->src_port &&
1432 mf->src_port_mask == nf->src_port_mask &&
1433 mf->dst_port == nf->dst_port &&
1434 mf->dst_port_mask == nf->dst_port_mask &&
1435 mf->ip_protocol == nf->ip_protocol &&
1436 mf->ip_addr_type == nf->ip_addr_type &&
1437 mf->ethertype == nf->ethertype &&
1438 mf->vni == nf->vni &&
1439 mf->tunnel_type == nf->tunnel_type &&
1440 mf->l2_ovlan == nf->l2_ovlan &&
1441 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1442 mf->l2_ivlan == nf->l2_ivlan &&
1443 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1444 !memcmp(mf->l2_addr, nf->l2_addr,
1445 RTE_ETHER_ADDR_LEN) &&
1446 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1447 RTE_ETHER_ADDR_LEN) &&
1448 !memcmp(mf->src_macaddr, nf->src_macaddr,
1449 RTE_ETHER_ADDR_LEN) &&
1450 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1451 RTE_ETHER_ADDR_LEN) &&
1452 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1453 sizeof(nf->src_ipaddr)) &&
1454 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1455 sizeof(nf->src_ipaddr_mask)) &&
1456 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1457 sizeof(nf->dst_ipaddr)) &&
1458 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1459 sizeof(nf->dst_ipaddr_mask))) {
1460 if (mf->dst_id == nf->dst_id)
1462 /* Free the old filter, update flow
1465 bnxt_update_filter(bp, mf, nf);
1466 STAILQ_REMOVE(&vnic->filter, mf,
1467 bnxt_filter_info, next);
1468 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1469 bnxt_free_filter(bp, mf);
1478 static struct rte_flow *
1479 bnxt_flow_create(struct rte_eth_dev *dev,
1480 const struct rte_flow_attr *attr,
1481 const struct rte_flow_item pattern[],
1482 const struct rte_flow_action actions[],
1483 struct rte_flow_error *error)
1485 struct bnxt *bp = dev->data->dev_private;
1486 struct bnxt_vnic_info *vnic = NULL;
1487 struct bnxt_filter_info *filter;
1488 bool update_flow = false;
1489 struct rte_flow *flow;
1494 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1495 rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1497 "Failed to create flow, Not a Trusted VF!");
1501 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1503 rte_flow_error_set(error, ENOMEM,
1504 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1505 "Failed to allocate memory");
1509 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1511 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1515 filter = bnxt_get_unused_filter(bp);
1516 if (filter == NULL) {
1517 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1521 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1526 ret = bnxt_match_filter(bp, filter);
1527 if (ret == -EEXIST) {
1528 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1529 /* Clear the filter that was created as part of
1530 * validate_and_parse_flow() above
1532 bnxt_hwrm_clear_l2_filter(bp, filter);
1534 } else if (ret == -EXDEV) {
1535 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1536 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1540 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1541 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1544 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1545 filter->enables == filter->tunnel_type) {
1546 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1548 rte_flow_error_set(error, -ret,
1549 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1550 "Unable to query tunnel to VF");
1553 if (tun_type == (1U << filter->tunnel_type)) {
1555 bnxt_hwrm_tunnel_redirect_free(bp,
1556 filter->tunnel_type);
1559 "Unable to free existing tunnel\n");
1560 rte_flow_error_set(error, -ret,
1561 RTE_FLOW_ERROR_TYPE_HANDLE,
1563 "Unable to free preexisting "
1568 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1570 rte_flow_error_set(error, -ret,
1571 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1572 "Unable to redirect tunnel to VF");
1575 vnic = &bp->vnic_info[0];
1579 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1581 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1582 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1585 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1587 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1588 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1591 for (i = 0; i < bp->max_vnics; i++) {
1592 vnic = &bp->vnic_info[i];
1593 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1594 filter->dst_id == vnic->fw_vnic_id) {
1595 PMD_DRV_LOG(ERR, "Found matching VNIC Id %d\n",
1601 if (!ret || update_flow) {
1602 flow->filter = filter;
1604 /* VNIC is set only in case of queue or RSS action */
1607 * RxQ0 is not used for flow filters.
1614 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1616 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1617 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1621 flow->filter = filter;
1627 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1628 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1632 bnxt_free_filter(bp, filter);
1635 rte_flow_error_set(error, ret,
1636 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1637 "Matching Flow exists.");
1638 else if (ret == -EXDEV)
1639 rte_flow_error_set(error, 0,
1640 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1641 "Flow with pattern exists, updating destination queue");
1642 else if (!rte_errno)
1643 rte_flow_error_set(error, -ret,
1644 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1645 "Failed to create flow.");
1651 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1652 struct bnxt_filter_info *filter,
1653 struct rte_flow_error *error)
1655 uint16_t tun_dst_fid;
1659 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1661 rte_flow_error_set(error, -ret,
1662 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1663 "Unable to query tunnel to VF");
1666 if (tun_type == (1U << filter->tunnel_type)) {
1667 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1670 rte_flow_error_set(error, -ret,
1671 RTE_FLOW_ERROR_TYPE_HANDLE,
1673 "tunnel_redirect info cmd fail");
1676 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1677 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1679 /* Tunnel doesn't belong to this VF, so don't send HWRM
1680 * cmd, just delete the flow from driver
1682 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1684 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1686 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1687 filter->tunnel_type);
1693 bnxt_flow_destroy(struct rte_eth_dev *dev,
1694 struct rte_flow *flow,
1695 struct rte_flow_error *error)
1697 struct bnxt *bp = dev->data->dev_private;
1698 struct bnxt_filter_info *filter = flow->filter;
1699 struct bnxt_vnic_info *vnic = flow->vnic;
1707 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1708 filter->enables == filter->tunnel_type) {
1709 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1718 ret = bnxt_match_filter(bp, filter);
1720 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1721 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1722 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1723 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1724 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1725 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1729 bnxt_free_filter(bp, filter);
1730 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1733 rte_flow_error_set(error, -ret,
1734 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1735 "Failed to destroy flow.");
1742 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1744 struct bnxt *bp = dev->data->dev_private;
1745 struct bnxt_vnic_info *vnic;
1746 struct rte_flow *flow;
1750 for (i = 0; i < bp->nr_vnics; i++) {
1751 vnic = &bp->vnic_info[i];
1752 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1753 struct bnxt_filter_info *filter = flow->filter;
1755 if (filter->filter_type ==
1756 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1757 filter->enables == filter->tunnel_type) {
1759 bnxt_handle_tunnel_redirect_destroy(bp,
1768 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1769 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1770 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1771 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1777 RTE_FLOW_ERROR_TYPE_HANDLE,
1779 "Failed to flush flow in HW.");
1783 bnxt_free_filter(bp, filter);
1784 STAILQ_REMOVE(&vnic->flow_list, flow,
1793 const struct rte_flow_ops bnxt_flow_ops = {
1794 .validate = bnxt_flow_validate,
1795 .create = bnxt_flow_create,
1796 .destroy = bnxt_flow_destroy,
1797 .flush = bnxt_flow_flush,