1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25 const struct rte_flow_item pattern[],
26 const struct rte_flow_action actions[],
27 struct rte_flow_error *error)
30 rte_flow_error_set(error,
32 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
39 rte_flow_error_set(error,
41 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
48 rte_flow_error_set(error,
50 RTE_FLOW_ERROR_TYPE_ATTR,
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81 struct rte_flow_error *error __rte_unused)
83 const struct rte_flow_item *item =
84 bnxt_flow_non_void_item(pattern);
88 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
90 case RTE_FLOW_ITEM_TYPE_ANY:
91 case RTE_FLOW_ITEM_TYPE_ETH:
94 case RTE_FLOW_ITEM_TYPE_VLAN:
98 case RTE_FLOW_ITEM_TYPE_IPV4:
99 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_TCP:
101 case RTE_FLOW_ITEM_TYPE_UDP:
103 /* need ntuple match, reset exact match */
107 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
113 if (has_vlan && use_ntuple) {
115 "VLAN flow cannot use NTUPLE filter\n");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM,
119 "Cannot use VLAN with NTUPLE");
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128 const struct rte_flow_attr *attr,
129 const struct rte_flow_item pattern[],
130 struct rte_flow_error *error,
131 struct bnxt_filter_info *filter)
133 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138 const struct rte_flow_item_udp *udp_spec, *udp_mask;
139 const struct rte_flow_item_eth *eth_spec, *eth_mask;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 if (!item->spec || !item->mask) {
181 rte_flow_error_set(error, EINVAL,
182 RTE_FLOW_ERROR_TYPE_ITEM,
184 "spec/mask is NULL");
188 switch (item->type) {
189 case RTE_FLOW_ITEM_TYPE_ANY:
191 ((const struct rte_flow_item_any *)item->spec)->num > 3;
193 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
195 case RTE_FLOW_ITEM_TYPE_ETH:
196 if (!item->spec || !item->mask)
199 eth_spec = item->spec;
200 eth_mask = item->mask;
202 /* Source MAC address mask cannot be partially set.
203 * Should be All 0's or all 1's.
204 * Destination MAC address mask must not be partially
205 * set. Should be all 1's or all 0's.
207 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
208 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
209 (!rte_is_zero_ether_addr(ð_mask->dst) &&
210 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
211 rte_flow_error_set(error,
213 RTE_FLOW_ERROR_TYPE_ITEM,
215 "MAC_addr mask not valid");
219 /* Mask is not allowed. Only exact matches are */
220 if (eth_mask->type &&
221 eth_mask->type != RTE_BE16(0xffff)) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ITEM,
225 "ethertype mask not valid");
229 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
230 if (!rte_is_unicast_ether_addr(ð_spec->dst)) {
231 rte_flow_error_set(error,
233 RTE_FLOW_ERROR_TYPE_ITEM,
238 rte_memcpy(filter->dst_macaddr,
239 ð_spec->dst, RTE_ETHER_ADDR_LEN);
241 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
242 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
243 valid_flags |= inner ?
244 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
245 BNXT_FLOW_L2_DST_VALID_FLAG;
246 filter->priority = attr->priority;
248 "Creating a priority flow\n");
251 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
252 if (!rte_is_unicast_ether_addr(ð_spec->src)) {
253 rte_flow_error_set(error,
255 RTE_FLOW_ERROR_TYPE_ITEM,
260 rte_memcpy(filter->src_macaddr,
261 ð_spec->src, RTE_ETHER_ADDR_LEN);
263 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265 valid_flags |= inner ?
266 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267 BNXT_FLOW_L2_SRC_VALID_FLAG;
270 * PMD_DRV_LOG(ERR, "Handle this condition\n");
273 if (eth_mask->type) {
275 rte_be_to_cpu_16(eth_spec->type);
279 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
282 case RTE_FLOW_ITEM_TYPE_VLAN:
283 vlan_spec = item->spec;
284 vlan_mask = item->mask;
285 if (en & en_ethertype) {
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "VLAN TPID matching is not"
293 if (vlan_mask->tci &&
294 vlan_mask->tci == RTE_BE16(0x0fff)) {
295 /* Only the VLAN ID can be matched. */
297 rte_be_to_cpu_16(vlan_spec->tci &
299 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
301 rte_flow_error_set(error,
303 RTE_FLOW_ERROR_TYPE_ITEM,
305 "VLAN mask is invalid");
308 if (vlan_mask->inner_type &&
309 vlan_mask->inner_type != RTE_BE16(0xffff)) {
310 rte_flow_error_set(error, EINVAL,
311 RTE_FLOW_ERROR_TYPE_ITEM,
313 "inner ethertype mask not"
317 if (vlan_mask->inner_type) {
319 rte_be_to_cpu_16(vlan_spec->inner_type);
324 case RTE_FLOW_ITEM_TYPE_IPV4:
325 /* If mask is not involved, we could use EM filters. */
326 ipv4_spec = item->spec;
327 ipv4_mask = item->mask;
329 if (!item->spec || !item->mask)
332 /* Only IP DST and SRC fields are maskable. */
333 if (ipv4_mask->hdr.version_ihl ||
334 ipv4_mask->hdr.type_of_service ||
335 ipv4_mask->hdr.total_length ||
336 ipv4_mask->hdr.packet_id ||
337 ipv4_mask->hdr.fragment_offset ||
338 ipv4_mask->hdr.time_to_live ||
339 ipv4_mask->hdr.next_proto_id ||
340 ipv4_mask->hdr.hdr_checksum) {
341 rte_flow_error_set(error,
343 RTE_FLOW_ERROR_TYPE_ITEM,
345 "Invalid IPv4 mask.");
349 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
350 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
353 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
354 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
356 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
357 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
359 if (ipv4_mask->hdr.src_addr) {
360 filter->src_ipaddr_mask[0] =
361 ipv4_mask->hdr.src_addr;
362 en |= !use_ntuple ? 0 :
363 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
366 if (ipv4_mask->hdr.dst_addr) {
367 filter->dst_ipaddr_mask[0] =
368 ipv4_mask->hdr.dst_addr;
369 en |= !use_ntuple ? 0 :
370 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
373 filter->ip_addr_type = use_ntuple ?
374 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
375 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
377 if (ipv4_spec->hdr.next_proto_id) {
378 filter->ip_protocol =
379 ipv4_spec->hdr.next_proto_id;
381 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
383 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
386 case RTE_FLOW_ITEM_TYPE_IPV6:
387 ipv6_spec = item->spec;
388 ipv6_mask = item->mask;
390 if (!item->spec || !item->mask)
393 /* Only IP DST and SRC fields are maskable. */
394 if (ipv6_mask->hdr.vtc_flow ||
395 ipv6_mask->hdr.payload_len ||
396 ipv6_mask->hdr.proto ||
397 ipv6_mask->hdr.hop_limits) {
398 rte_flow_error_set(error,
400 RTE_FLOW_ERROR_TYPE_ITEM,
402 "Invalid IPv6 mask.");
407 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
408 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
410 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
411 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
413 rte_memcpy(filter->src_ipaddr,
414 ipv6_spec->hdr.src_addr, 16);
415 rte_memcpy(filter->dst_ipaddr,
416 ipv6_spec->hdr.dst_addr, 16);
418 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
420 rte_memcpy(filter->src_ipaddr_mask,
421 ipv6_mask->hdr.src_addr, 16);
422 en |= !use_ntuple ? 0 :
423 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
426 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
428 rte_memcpy(filter->dst_ipaddr_mask,
429 ipv6_mask->hdr.dst_addr, 16);
430 en |= !use_ntuple ? 0 :
431 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
434 filter->ip_addr_type = use_ntuple ?
435 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
436 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
438 case RTE_FLOW_ITEM_TYPE_TCP:
439 tcp_spec = item->spec;
440 tcp_mask = item->mask;
442 if (!item->spec || !item->mask)
445 /* Check TCP mask. Only DST & SRC ports are maskable */
446 if (tcp_mask->hdr.sent_seq ||
447 tcp_mask->hdr.recv_ack ||
448 tcp_mask->hdr.data_off ||
449 tcp_mask->hdr.tcp_flags ||
450 tcp_mask->hdr.rx_win ||
451 tcp_mask->hdr.cksum ||
452 tcp_mask->hdr.tcp_urp) {
453 rte_flow_error_set(error,
455 RTE_FLOW_ERROR_TYPE_ITEM,
461 filter->src_port = tcp_spec->hdr.src_port;
462 filter->dst_port = tcp_spec->hdr.dst_port;
465 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
466 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
468 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
469 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
471 if (tcp_mask->hdr.dst_port) {
472 filter->dst_port_mask = tcp_mask->hdr.dst_port;
473 en |= !use_ntuple ? 0 :
474 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
477 if (tcp_mask->hdr.src_port) {
478 filter->src_port_mask = tcp_mask->hdr.src_port;
479 en |= !use_ntuple ? 0 :
480 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
483 case RTE_FLOW_ITEM_TYPE_UDP:
484 udp_spec = item->spec;
485 udp_mask = item->mask;
487 if (!item->spec || !item->mask)
490 if (udp_mask->hdr.dgram_len ||
491 udp_mask->hdr.dgram_cksum) {
492 rte_flow_error_set(error,
494 RTE_FLOW_ERROR_TYPE_ITEM,
500 filter->src_port = udp_spec->hdr.src_port;
501 filter->dst_port = udp_spec->hdr.dst_port;
504 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
505 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
507 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
508 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
510 if (udp_mask->hdr.dst_port) {
511 filter->dst_port_mask = udp_mask->hdr.dst_port;
512 en |= !use_ntuple ? 0 :
513 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
516 if (udp_mask->hdr.src_port) {
517 filter->src_port_mask = udp_mask->hdr.src_port;
518 en |= !use_ntuple ? 0 :
519 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
522 case RTE_FLOW_ITEM_TYPE_VXLAN:
523 vxlan_spec = item->spec;
524 vxlan_mask = item->mask;
525 /* Check if VXLAN item is used to describe protocol.
526 * If yes, both spec and mask should be NULL.
527 * If no, both spec and mask shouldn't be NULL.
529 if ((!vxlan_spec && vxlan_mask) ||
530 (vxlan_spec && !vxlan_mask)) {
531 rte_flow_error_set(error,
533 RTE_FLOW_ERROR_TYPE_ITEM,
535 "Invalid VXLAN item");
539 if (!vxlan_spec && !vxlan_mask) {
540 filter->tunnel_type =
541 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
545 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
546 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
547 vxlan_spec->flags != 0x8) {
548 rte_flow_error_set(error,
550 RTE_FLOW_ERROR_TYPE_ITEM,
552 "Invalid VXLAN item");
556 /* Check if VNI is masked. */
557 if (vxlan_spec && vxlan_mask) {
559 !!memcmp(vxlan_mask->vni, vni_mask,
565 RTE_FLOW_ERROR_TYPE_ITEM,
571 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
574 rte_be_to_cpu_32(tenant_id_be);
575 filter->tunnel_type =
576 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
579 case RTE_FLOW_ITEM_TYPE_NVGRE:
580 nvgre_spec = item->spec;
581 nvgre_mask = item->mask;
582 /* Check if NVGRE item is used to describe protocol.
583 * If yes, both spec and mask should be NULL.
584 * If no, both spec and mask shouldn't be NULL.
586 if ((!nvgre_spec && nvgre_mask) ||
587 (nvgre_spec && !nvgre_mask)) {
588 rte_flow_error_set(error,
590 RTE_FLOW_ERROR_TYPE_ITEM,
592 "Invalid NVGRE item");
596 if (!nvgre_spec && !nvgre_mask) {
597 filter->tunnel_type =
598 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
602 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
603 nvgre_spec->protocol != 0x6558) {
604 rte_flow_error_set(error,
606 RTE_FLOW_ERROR_TYPE_ITEM,
608 "Invalid NVGRE item");
612 if (nvgre_spec && nvgre_mask) {
614 !!memcmp(nvgre_mask->tni, tni_mask,
620 RTE_FLOW_ERROR_TYPE_ITEM,
625 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
628 rte_be_to_cpu_32(tenant_id_be);
629 filter->tunnel_type =
630 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
634 case RTE_FLOW_ITEM_TYPE_GRE:
635 gre_spec = (const struct rte_flow_item_gre *)item->spec;
636 gre_mask = (const struct rte_flow_item_gre *)item->mask;
639 *Check if GRE item is used to describe protocol.
640 * If yes, both spec and mask should be NULL.
641 * If no, both spec and mask shouldn't be NULL.
643 if (!!gre_spec ^ !!gre_mask) {
644 rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ITEM,
651 if (!gre_spec && !gre_mask) {
652 filter->tunnel_type =
653 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
658 case RTE_FLOW_ITEM_TYPE_VF:
659 vf_spec = item->spec;
662 rte_flow_error_set(error,
664 RTE_FLOW_ERROR_TYPE_ITEM,
666 "Configuring on a VF!");
670 if (vf >= bp->pdev->max_vfs) {
671 rte_flow_error_set(error,
673 RTE_FLOW_ERROR_TYPE_ITEM,
679 if (!attr->transfer) {
680 rte_flow_error_set(error,
682 RTE_FLOW_ERROR_TYPE_ITEM,
684 "Matching VF traffic without"
685 " affecting it (transfer attribute)"
690 filter->mirror_vnic_id =
691 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
693 /* This simply indicates there's no driver
694 * loaded. This is not an error.
699 RTE_FLOW_ERROR_TYPE_ITEM,
701 "Unable to get default VNIC for VF");
705 filter->mirror_vnic_id = dflt_vnic;
706 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
713 filter->enables = en;
714 filter->valid_flags = valid_flags;
719 /* Parse attributes */
721 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
722 struct rte_flow_error *error)
724 /* Must be input direction */
725 if (!attr->ingress) {
726 rte_flow_error_set(error,
728 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
730 "Only support ingress.");
736 rte_flow_error_set(error,
738 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
740 "No support for egress.");
747 static struct bnxt_filter_info *
748 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
750 struct bnxt_filter_info *mf, *f0;
751 struct bnxt_vnic_info *vnic0;
752 struct rte_flow *flow;
755 vnic0 = &bp->vnic_info[0];
756 f0 = STAILQ_FIRST(&vnic0->filter);
758 /* This flow has same DST MAC as the port/l2 filter. */
759 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
762 for (i = bp->max_vnics - 1; i >= 0; i--) {
763 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
765 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
768 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
771 if (mf->matching_l2_fltr_ptr)
774 if (mf->ethertype == nf->ethertype &&
775 mf->l2_ovlan == nf->l2_ovlan &&
776 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
777 mf->l2_ivlan == nf->l2_ivlan &&
778 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
779 !memcmp(mf->src_macaddr, nf->src_macaddr,
780 RTE_ETHER_ADDR_LEN) &&
781 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
789 static struct bnxt_filter_info *
790 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
791 struct bnxt_vnic_info *vnic)
793 struct bnxt_filter_info *filter1;
796 /* Alloc new L2 filter.
797 * This flow needs MAC filter which does not match any existing
800 filter1 = bnxt_get_unused_filter(bp);
804 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
805 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
806 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
807 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
809 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
810 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
813 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
814 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
815 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
816 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
818 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
819 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
821 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
822 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
826 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
827 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
828 /* Tell the FW where to place the filter in the table. */
829 if (nf->priority > 65535) {
831 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
832 /* This will place the filter in TCAM */
833 filter1->l2_filter_id_hint = (uint64_t)-1;
837 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
838 BNXT_FLOW_L2_SRC_VALID_FLAG |
839 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
840 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
842 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
843 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
844 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
847 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
849 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
850 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
851 /* Num VLANs for drop filter will/should be 0.
852 * If the req is memset to 0, then the count will
853 * be automatically set to 0.
855 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
857 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
860 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
862 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
867 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
870 bnxt_free_filter(bp, filter1);
873 filter1->l2_ref_cnt++;
877 struct bnxt_filter_info *
878 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
879 struct bnxt_vnic_info *vnic)
881 struct bnxt_filter_info *l2_filter = NULL;
883 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
885 l2_filter->l2_ref_cnt++;
886 nf->matching_l2_fltr_ptr = l2_filter;
888 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
889 nf->matching_l2_fltr_ptr = NULL;
895 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
897 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
898 uint64_t rx_offloads = dev_conf->rxmode.offloads;
901 rc = bnxt_vnic_grp_alloc(bp, vnic);
905 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
907 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
912 /* RSS context is required only when there is more than one RSS ring */
913 if (vnic->rx_queue_cnt > 1) {
914 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
917 "HWRM vnic ctx alloc failure: %x\n", rc);
921 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
924 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
925 vnic->vlan_strip = true;
927 vnic->vlan_strip = false;
929 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
933 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
939 static int match_vnic_rss_cfg(struct bnxt *bp,
940 struct bnxt_vnic_info *vnic,
941 const struct rte_flow_action_rss *rss)
943 unsigned int match = 0, i;
945 if (vnic->rx_queue_cnt != rss->queue_num)
948 for (i = 0; i < rss->queue_num; i++) {
949 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
950 !bp->rx_queues[rss->queue[i]]->rx_started)
954 for (i = 0; i < vnic->rx_queue_cnt; i++) {
957 for (j = 0; j < vnic->rx_queue_cnt; j++) {
958 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
964 if (match != vnic->rx_queue_cnt) {
966 "VNIC queue count %d vs queues matched %d\n",
967 match, vnic->rx_queue_cnt);
975 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
976 struct bnxt_filter_info *filter1,
980 !(filter->valid_flags &
981 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
982 BNXT_FLOW_L2_SRC_VALID_FLAG |
983 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
984 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
985 BNXT_FLOW_L2_DROP_FLAG |
986 BNXT_FLOW_PARSE_INNER_FLAG))) {
987 filter->flags = filter1->flags;
988 filter->enables = filter1->enables;
989 filter->filter_type = HWRM_CFA_L2_FILTER;
990 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
991 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
992 filter->pri_hint = filter1->pri_hint;
993 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
995 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
996 filter->l2_ref_cnt = filter1->l2_ref_cnt;
998 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
999 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1003 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1004 const struct rte_flow_item pattern[],
1005 const struct rte_flow_action actions[],
1006 const struct rte_flow_attr *attr,
1007 struct rte_flow_error *error,
1008 struct bnxt_filter_info *filter)
1010 const struct rte_flow_action *act =
1011 bnxt_flow_non_void_action(actions);
1012 struct bnxt *bp = dev->data->dev_private;
1013 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1014 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1015 const struct rte_flow_action_queue *act_q;
1016 const struct rte_flow_action_vf *act_vf;
1017 struct bnxt_filter_info *filter1 = NULL;
1018 const struct rte_flow_action_rss *rss;
1019 struct bnxt_rx_queue *rxq = NULL;
1020 int dflt_vnic, vnic_id;
1021 unsigned int rss_idx;
1026 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1030 rc = bnxt_flow_parse_attr(attr, error);
1034 /* Since we support ingress attribute only - right now. */
1035 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1036 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1038 use_ntuple = bnxt_filter_type_check(pattern, error);
1039 switch (act->type) {
1040 case RTE_FLOW_ACTION_TYPE_QUEUE:
1041 /* Allow this flow. Redirect to a VNIC. */
1042 act_q = (const struct rte_flow_action_queue *)act->conf;
1043 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1044 rte_flow_error_set(error,
1046 RTE_FLOW_ERROR_TYPE_ACTION,
1048 "Invalid queue ID.");
1052 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1054 vnic_id = attr->group;
1056 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1057 vnic_id = act_q->index;
1060 vnic = &bp->vnic_info[vnic_id];
1062 rte_flow_error_set(error,
1064 RTE_FLOW_ERROR_TYPE_ACTION,
1066 "No matching VNIC found.");
1070 if (vnic->rx_queue_cnt) {
1071 if (vnic->start_grp_id != act_q->index) {
1073 "VNIC already in use\n");
1074 rte_flow_error_set(error,
1076 RTE_FLOW_ERROR_TYPE_ACTION,
1078 "VNIC already in use");
1085 rxq = bp->rx_queues[act_q->index];
1087 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1088 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1092 //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1093 //INVALID_HW_RING_ID ||
1094 //!rxq->rx_deferred_start) {
1096 bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1097 INVALID_HW_RING_ID) {
1099 "Queue invalid or used with other VNIC\n");
1100 rte_flow_error_set(error,
1102 RTE_FLOW_ERROR_TYPE_ACTION,
1104 "Queue invalid queue or in use");
1110 rxq->rx_started = 1;
1111 vnic->rx_queue_cnt++;
1112 vnic->start_grp_id = act_q->index;
1113 vnic->end_grp_id = act_q->index;
1114 vnic->func_default = 0; //This is not a default VNIC.
1116 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1118 rc = bnxt_vnic_prep(bp, vnic);
1120 rte_flow_error_set(error,
1122 RTE_FLOW_ERROR_TYPE_ACTION,
1130 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1131 act_q->index, vnic, vnic->fw_grp_ids);
1134 vnic->ff_pool_idx = vnic_id;
1136 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1137 filter->dst_id = vnic->fw_vnic_id;
1138 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1139 if (filter1 == NULL) {
1140 rte_flow_error_set(error,
1142 RTE_FLOW_ERROR_TYPE_ACTION,
1144 "Filter not available");
1149 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1150 filter, filter1, filter1->l2_ref_cnt);
1151 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1153 case RTE_FLOW_ACTION_TYPE_DROP:
1154 vnic0 = &bp->vnic_info[0];
1155 filter->dst_id = vnic0->fw_vnic_id;
1156 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1157 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1158 if (filter1 == NULL) {
1159 rte_flow_error_set(error,
1161 RTE_FLOW_ERROR_TYPE_ACTION,
1163 "Filter not available");
1168 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1170 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1171 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1173 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1175 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1177 case RTE_FLOW_ACTION_TYPE_COUNT:
1178 vnic0 = &bp->vnic_info[0];
1179 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1180 if (filter1 == NULL) {
1181 rte_flow_error_set(error,
1183 RTE_FLOW_ERROR_TYPE_ACTION,
1185 "New filter not available");
1190 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1191 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1193 case RTE_FLOW_ACTION_TYPE_VF:
1194 act_vf = (const struct rte_flow_action_vf *)act->conf;
1197 if (filter->tunnel_type ==
1198 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1199 filter->tunnel_type ==
1200 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1201 /* If issued on a VF, ensure id is 0 and is trusted */
1203 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1213 filter->enables |= filter->tunnel_type;
1214 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1218 if (vf >= bp->pdev->max_vfs) {
1219 rte_flow_error_set(error,
1221 RTE_FLOW_ERROR_TYPE_ACTION,
1223 "Incorrect VF id!");
1228 filter->mirror_vnic_id =
1229 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1230 if (dflt_vnic < 0) {
1231 /* This simply indicates there's no driver loaded.
1232 * This is not an error.
1234 rte_flow_error_set(error,
1236 RTE_FLOW_ERROR_TYPE_ACTION,
1238 "Unable to get default VNIC for VF");
1243 filter->mirror_vnic_id = dflt_vnic;
1244 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1246 vnic0 = &bp->vnic_info[0];
1247 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1248 if (filter1 == NULL) {
1249 rte_flow_error_set(error,
1251 RTE_FLOW_ERROR_TYPE_ACTION,
1253 "New filter not available");
1258 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1260 case RTE_FLOW_ACTION_TYPE_RSS:
1261 rss = (const struct rte_flow_action_rss *)act->conf;
1263 vnic_id = attr->group;
1265 PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1266 rte_flow_error_set(error,
1268 RTE_FLOW_ERROR_TYPE_ATTR,
1270 "Group id cannot be 0");
1275 vnic = &bp->vnic_info[vnic_id];
1277 rte_flow_error_set(error,
1279 RTE_FLOW_ERROR_TYPE_ACTION,
1281 "No matching VNIC for RSS group.");
1285 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1287 /* Check if requested RSS config matches RSS config of VNIC
1288 * only if it is not a fresh VNIC configuration.
1289 * Otherwise the existing VNIC configuration can be used.
1291 if (vnic->rx_queue_cnt) {
1292 rc = match_vnic_rss_cfg(bp, vnic, rss);
1295 "VNIC and RSS config mismatch\n");
1296 rte_flow_error_set(error,
1298 RTE_FLOW_ERROR_TYPE_ACTION,
1300 "VNIC and RSS cfg mismatch");
1307 for (i = 0; i < rss->queue_num; i++) {
1308 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1311 if (!rss->queue[i] ||
1312 rss->queue[i] >= bp->rx_nr_rings ||
1313 !bp->rx_queues[rss->queue[i]]) {
1314 rte_flow_error_set(error,
1316 RTE_FLOW_ERROR_TYPE_ACTION,
1318 "Invalid queue ID for RSS");
1322 rxq = bp->rx_queues[rss->queue[i]];
1324 //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1325 //INVALID_HW_RING_ID ||
1326 //!rxq->rx_deferred_start) {
1327 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1328 INVALID_HW_RING_ID) {
1330 "queue active with other VNIC\n");
1331 rte_flow_error_set(error,
1333 RTE_FLOW_ERROR_TYPE_ACTION,
1335 "Invalid queue ID for RSS");
1341 rxq->rx_started = 1;
1342 vnic->rx_queue_cnt++;
1345 vnic->start_grp_id = rss->queue[0];
1346 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1347 vnic->func_default = 0; //This is not a default VNIC.
1349 rc = bnxt_vnic_prep(bp, vnic);
1351 rte_flow_error_set(error,
1353 RTE_FLOW_ERROR_TYPE_ACTION,
1361 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1362 vnic_id, vnic, vnic->fw_grp_ids);
1364 vnic->ff_pool_idx = vnic_id;
1366 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1368 /* This can be done only after vnic_grp_alloc is done. */
1369 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1370 vnic->fw_grp_ids[i] =
1371 bp->grp_info[rss->queue[i]].fw_grp_id;
1372 /* Make sure vnic0 does not use these rings. */
1373 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1377 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1378 for (i = 0; i < vnic->rx_queue_cnt; i++)
1379 vnic->rss_table[rss_idx++] =
1380 vnic->fw_grp_ids[i];
1383 /* Configure RSS only if the queue count is > 1 */
1384 if (vnic->rx_queue_cnt > 1) {
1386 bnxt_rte_to_hwrm_hash_types(rss->types);
1388 if (!rss->key_len) {
1389 /* If hash key has not been specified,
1390 * use random hash key.
1392 prandom_bytes(vnic->rss_hash_key,
1395 if (rss->key_len > HW_HASH_KEY_SIZE)
1396 memcpy(vnic->rss_hash_key,
1400 memcpy(vnic->rss_hash_key,
1404 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1406 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1410 filter->dst_id = vnic->fw_vnic_id;
1411 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1412 if (filter1 == NULL) {
1413 rte_flow_error_set(error,
1415 RTE_FLOW_ERROR_TYPE_ACTION,
1417 "New filter not available");
1422 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1423 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1426 rte_flow_error_set(error,
1428 RTE_FLOW_ERROR_TYPE_ACTION,
1435 if (filter1 && !filter->matching_l2_fltr_ptr) {
1436 bnxt_free_filter(bp, filter1);
1437 filter1->fw_l2_filter_id = -1;
1441 act = bnxt_flow_non_void_action(++act);
1442 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1443 rte_flow_error_set(error,
1445 RTE_FLOW_ERROR_TYPE_ACTION,
1455 //TODO: Cleanup according to ACTION TYPE.
1457 if (vnic && STAILQ_EMPTY(&vnic->filter))
1458 vnic->rx_queue_cnt = 0;
1460 if (rxq && !vnic->rx_queue_cnt)
1461 rxq->vnic = &bp->vnic_info[0];
1467 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1468 struct bnxt_filter_info *filter)
1470 struct bnxt_vnic_info *vnic = NULL;
1473 for (i = 0; i < bp->max_vnics; i++) {
1474 vnic = &bp->vnic_info[i];
1475 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1476 filter->dst_id == vnic->fw_vnic_id) {
1477 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1486 bnxt_flow_validate(struct rte_eth_dev *dev,
1487 const struct rte_flow_attr *attr,
1488 const struct rte_flow_item pattern[],
1489 const struct rte_flow_action actions[],
1490 struct rte_flow_error *error)
1492 struct bnxt *bp = dev->data->dev_private;
1493 struct bnxt_vnic_info *vnic = NULL;
1494 struct bnxt_filter_info *filter;
1497 bnxt_acquire_flow_lock(bp);
1498 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1500 bnxt_release_flow_lock(bp);
1504 filter = bnxt_get_unused_filter(bp);
1505 if (filter == NULL) {
1506 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1507 bnxt_release_flow_lock(bp);
1511 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1516 vnic = find_matching_vnic(bp, filter);
1518 if (STAILQ_EMPTY(&vnic->filter)) {
1519 rte_free(vnic->fw_grp_ids);
1520 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1521 bnxt_hwrm_vnic_free(bp, vnic);
1522 vnic->rx_queue_cnt = 0;
1523 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1527 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1528 bnxt_hwrm_clear_em_filter(bp, filter);
1529 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1530 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1532 bnxt_hwrm_clear_l2_filter(bp, filter);
1535 /* No need to hold on to this filter if we are just validating flow */
1536 filter->fw_l2_filter_id = UINT64_MAX;
1537 bnxt_free_filter(bp, filter);
1538 bnxt_release_flow_lock(bp);
1544 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1545 struct bnxt_filter_info *new_filter)
1547 /* Clear the new L2 filter that was created in the previous step in
1548 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1549 * filter which points to the new destination queue and so we clear
1550 * the previous L2 filter. For ntuple filters, we are going to reuse
1551 * the old L2 filter and create new NTUPLE filter with this new
1552 * destination queue subsequently during bnxt_flow_create.
1554 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1555 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1556 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1558 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1559 bnxt_hwrm_clear_em_filter(bp, old_filter);
1560 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1561 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1566 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1568 struct bnxt_filter_info *mf;
1569 struct rte_flow *flow;
1572 for (i = bp->max_vnics - 1; i >= 0; i--) {
1573 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1575 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1578 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1581 if (mf->filter_type == nf->filter_type &&
1582 mf->flags == nf->flags &&
1583 mf->src_port == nf->src_port &&
1584 mf->src_port_mask == nf->src_port_mask &&
1585 mf->dst_port == nf->dst_port &&
1586 mf->dst_port_mask == nf->dst_port_mask &&
1587 mf->ip_protocol == nf->ip_protocol &&
1588 mf->ip_addr_type == nf->ip_addr_type &&
1589 mf->ethertype == nf->ethertype &&
1590 mf->vni == nf->vni &&
1591 mf->tunnel_type == nf->tunnel_type &&
1592 mf->l2_ovlan == nf->l2_ovlan &&
1593 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1594 mf->l2_ivlan == nf->l2_ivlan &&
1595 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1596 !memcmp(mf->l2_addr, nf->l2_addr,
1597 RTE_ETHER_ADDR_LEN) &&
1598 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1599 RTE_ETHER_ADDR_LEN) &&
1600 !memcmp(mf->src_macaddr, nf->src_macaddr,
1601 RTE_ETHER_ADDR_LEN) &&
1602 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1603 RTE_ETHER_ADDR_LEN) &&
1604 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1605 sizeof(nf->src_ipaddr)) &&
1606 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1607 sizeof(nf->src_ipaddr_mask)) &&
1608 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1609 sizeof(nf->dst_ipaddr)) &&
1610 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1611 sizeof(nf->dst_ipaddr_mask))) {
1612 if (mf->dst_id == nf->dst_id)
1614 /* Free the old filter, update flow
1617 bnxt_update_filter(bp, mf, nf);
1618 STAILQ_REMOVE(&vnic->filter, mf,
1619 bnxt_filter_info, next);
1620 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1621 bnxt_free_filter(bp, mf);
1630 static struct rte_flow *
1631 bnxt_flow_create(struct rte_eth_dev *dev,
1632 const struct rte_flow_attr *attr,
1633 const struct rte_flow_item pattern[],
1634 const struct rte_flow_action actions[],
1635 struct rte_flow_error *error)
1637 struct bnxt *bp = dev->data->dev_private;
1638 struct bnxt_vnic_info *vnic = NULL;
1639 struct bnxt_filter_info *filter;
1640 bool update_flow = false;
1641 struct rte_flow *flow;
1645 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1646 rte_flow_error_set(error, EINVAL,
1647 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1648 "Failed to create flow, Not a Trusted VF!");
1652 if (!dev->data->dev_started) {
1653 rte_flow_error_set(error,
1655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1657 "Device must be started");
1661 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1663 rte_flow_error_set(error, ENOMEM,
1664 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1665 "Failed to allocate memory");
1669 bnxt_acquire_flow_lock(bp);
1670 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1672 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1676 filter = bnxt_get_unused_filter(bp);
1677 if (filter == NULL) {
1678 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1682 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1687 ret = bnxt_match_filter(bp, filter);
1688 if (ret == -EEXIST) {
1689 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1690 /* Clear the filter that was created as part of
1691 * validate_and_parse_flow() above
1693 bnxt_hwrm_clear_l2_filter(bp, filter);
1695 } else if (ret == -EXDEV) {
1696 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1697 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1701 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1702 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1705 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1706 filter->enables == filter->tunnel_type) {
1707 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1709 rte_flow_error_set(error, -ret,
1710 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1711 "Unable to query tunnel to VF");
1714 if (tun_type == (1U << filter->tunnel_type)) {
1716 bnxt_hwrm_tunnel_redirect_free(bp,
1717 filter->tunnel_type);
1720 "Unable to free existing tunnel\n");
1721 rte_flow_error_set(error, -ret,
1722 RTE_FLOW_ERROR_TYPE_HANDLE,
1724 "Unable to free preexisting "
1729 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1731 rte_flow_error_set(error, -ret,
1732 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1733 "Unable to redirect tunnel to VF");
1736 vnic = &bp->vnic_info[0];
1740 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1742 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1743 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1746 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1748 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1749 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1752 vnic = find_matching_vnic(bp, filter);
1754 if (!ret || update_flow) {
1755 flow->filter = filter;
1762 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1763 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1764 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1765 bnxt_release_flow_lock(bp);
1770 bnxt_free_filter(bp, filter);
1773 rte_flow_error_set(error, ret,
1774 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1775 "Matching Flow exists.");
1776 else if (ret == -EXDEV)
1777 rte_flow_error_set(error, 0,
1778 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1779 "Flow with pattern exists, updating destination queue");
1781 rte_flow_error_set(error, -ret,
1782 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1783 "Failed to create flow.");
1786 bnxt_release_flow_lock(bp);
1790 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1791 struct bnxt_filter_info *filter,
1792 struct rte_flow_error *error)
1794 uint16_t tun_dst_fid;
1798 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1800 rte_flow_error_set(error, -ret,
1801 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1802 "Unable to query tunnel to VF");
1805 if (tun_type == (1U << filter->tunnel_type)) {
1806 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1809 rte_flow_error_set(error, -ret,
1810 RTE_FLOW_ERROR_TYPE_HANDLE,
1812 "tunnel_redirect info cmd fail");
1815 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1816 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1818 /* Tunnel doesn't belong to this VF, so don't send HWRM
1819 * cmd, just delete the flow from driver
1821 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1823 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1825 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1826 filter->tunnel_type);
1832 bnxt_flow_destroy(struct rte_eth_dev *dev,
1833 struct rte_flow *flow,
1834 struct rte_flow_error *error)
1836 struct bnxt *bp = dev->data->dev_private;
1837 struct bnxt_filter_info *filter;
1838 struct bnxt_vnic_info *vnic;
1841 bnxt_acquire_flow_lock(bp);
1843 rte_flow_error_set(error, EINVAL,
1844 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1845 "Invalid flow: failed to destroy flow.");
1846 bnxt_release_flow_lock(bp);
1850 filter = flow->filter;
1854 rte_flow_error_set(error, EINVAL,
1855 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1856 "Invalid flow: failed to destroy flow.");
1857 bnxt_release_flow_lock(bp);
1861 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1862 filter->enables == filter->tunnel_type) {
1863 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1869 bnxt_release_flow_lock(bp);
1874 ret = bnxt_match_filter(bp, filter);
1876 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1878 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1879 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1880 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1881 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1882 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1886 /* If it is a L2 drop filter, when the filter is created,
1887 * the FW updates the BC/MC records.
1888 * Once this filter is removed, issue the set_rx_mask command
1889 * to reset the BC/MC records in the HW to the settings
1890 * before the drop counter is created.
1892 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1893 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1895 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1896 bnxt_free_filter(bp, filter);
1897 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1900 /* If this was the last flow associated with this vnic,
1901 * switch the queue back to RSS pool.
1903 if (vnic && !vnic->func_default &&
1904 STAILQ_EMPTY(&vnic->flow_list)) {
1905 rte_free(vnic->fw_grp_ids);
1906 if (vnic->rx_queue_cnt > 1)
1907 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1909 bnxt_hwrm_vnic_free(bp, vnic);
1910 vnic->rx_queue_cnt = 0;
1913 rte_flow_error_set(error, -ret,
1914 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1915 "Failed to destroy flow.");
1918 bnxt_release_flow_lock(bp);
1923 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1925 struct bnxt *bp = dev->data->dev_private;
1926 struct bnxt_filter_info *filter = NULL;
1927 struct bnxt_vnic_info *vnic;
1928 struct rte_flow *flow;
1932 bnxt_acquire_flow_lock(bp);
1933 for (i = 0; i < bp->max_vnics; i++) {
1934 vnic = &bp->vnic_info[i];
1935 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1938 while (!STAILQ_EMPTY(&vnic->flow_list)) {
1939 flow = STAILQ_FIRST(&vnic->flow_list);
1940 filter = flow->filter;
1942 if (filter->filter_type ==
1943 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1944 filter->enables == filter->tunnel_type) {
1946 bnxt_handle_tunnel_redirect_destroy(bp,
1952 bnxt_release_flow_lock(bp);
1957 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1958 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1959 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1960 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1962 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1968 RTE_FLOW_ERROR_TYPE_HANDLE,
1970 "Failed to flush flow in HW.");
1971 bnxt_release_flow_lock(bp);
1975 STAILQ_REMOVE(&vnic->flow_list, flow,
1978 STAILQ_REMOVE(&vnic->filter,
1982 bnxt_free_filter(bp, filter);
1986 /* If this was the last flow associated with this vnic,
1987 * switch the queue back to RSS pool.
1989 if (STAILQ_EMPTY(&vnic->flow_list)) {
1990 rte_free(vnic->fw_grp_ids);
1991 if (vnic->rx_queue_cnt > 1)
1992 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1993 bnxt_hwrm_vnic_free(bp, vnic);
1994 vnic->rx_queue_cnt = 0;
1999 bnxt_release_flow_lock(bp);
2003 const struct rte_flow_ops bnxt_flow_ops = {
2004 .validate = bnxt_flow_validate,
2005 .create = bnxt_flow_create,
2006 .destroy = bnxt_flow_destroy,
2007 .flush = bnxt_flow_flush,