1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
23 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
24 const struct rte_flow_item pattern[],
25 const struct rte_flow_action actions[],
26 struct rte_flow_error *error)
29 rte_flow_error_set(error,
31 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
38 rte_flow_error_set(error,
40 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
47 rte_flow_error_set(error,
49 RTE_FLOW_ERROR_TYPE_ATTR,
58 static const struct rte_flow_item *
59 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
62 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
68 static const struct rte_flow_action *
69 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
72 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
79 bnxt_filter_type_check(const struct rte_flow_item pattern[],
80 struct rte_flow_error *error)
82 const struct rte_flow_item *item =
83 bnxt_flow_non_void_item(pattern);
87 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89 case RTE_FLOW_ITEM_TYPE_ANY:
90 case RTE_FLOW_ITEM_TYPE_ETH:
93 case RTE_FLOW_ITEM_TYPE_VLAN:
97 case RTE_FLOW_ITEM_TYPE_IPV4:
98 case RTE_FLOW_ITEM_TYPE_IPV6:
99 case RTE_FLOW_ITEM_TYPE_TCP:
100 case RTE_FLOW_ITEM_TYPE_UDP:
102 /* need ntuple match, reset exact match */
106 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
112 if (has_vlan && use_ntuple) {
114 "VLAN flow cannot use NTUPLE filter\n");
115 rte_flow_error_set(error, EINVAL,
116 RTE_FLOW_ERROR_TYPE_ITEM,
118 "Cannot use VLAN with NTUPLE");
126 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
127 const struct rte_flow_attr *attr,
128 const struct rte_flow_item pattern[],
129 struct rte_flow_error *error,
130 struct bnxt_filter_info *filter)
132 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
133 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
134 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
135 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
136 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
137 const struct rte_flow_item_udp *udp_spec, *udp_mask;
138 const struct rte_flow_item_eth *eth_spec, *eth_mask;
139 const struct rte_ether_addr *dst, *src;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 switch (item->type) {
181 case RTE_FLOW_ITEM_TYPE_ANY:
183 ((const struct rte_flow_item_any *)item->spec)->num > 3;
185 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
187 case RTE_FLOW_ITEM_TYPE_ETH:
188 if (!item->spec || !item->mask)
191 eth_spec = item->spec;
192 eth_mask = item->mask;
194 /* Source MAC address mask cannot be partially set.
195 * Should be All 0's or all 1's.
196 * Destination MAC address mask must not be partially
197 * set. Should be all 1's or all 0's.
199 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
200 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
201 (!rte_is_zero_ether_addr(ð_mask->dst) &&
202 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
203 rte_flow_error_set(error,
205 RTE_FLOW_ERROR_TYPE_ITEM,
207 "MAC_addr mask not valid");
211 /* Mask is not allowed. Only exact matches are */
212 if (eth_mask->type &&
213 eth_mask->type != RTE_BE16(0xffff)) {
214 rte_flow_error_set(error, EINVAL,
215 RTE_FLOW_ERROR_TYPE_ITEM,
217 "ethertype mask not valid");
221 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
222 dst = ð_spec->dst;
223 if (!rte_is_valid_assigned_ether_addr(dst)) {
224 rte_flow_error_set(error,
226 RTE_FLOW_ERROR_TYPE_ITEM,
230 "DMAC is invalid!\n");
233 rte_memcpy(filter->dst_macaddr,
234 ð_spec->dst, RTE_ETHER_ADDR_LEN);
236 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
237 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
238 valid_flags |= inner ?
239 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
240 BNXT_FLOW_L2_DST_VALID_FLAG;
241 filter->priority = attr->priority;
243 "Creating a priority flow\n");
245 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
246 src = ð_spec->src;
247 if (!rte_is_valid_assigned_ether_addr(src)) {
248 rte_flow_error_set(error,
250 RTE_FLOW_ERROR_TYPE_ITEM,
254 "SMAC is invalid!\n");
257 rte_memcpy(filter->src_macaddr,
258 ð_spec->src, RTE_ETHER_ADDR_LEN);
260 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
261 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
262 valid_flags |= inner ?
263 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
264 BNXT_FLOW_L2_SRC_VALID_FLAG;
267 * PMD_DRV_LOG(ERR, "Handle this condition\n");
270 if (eth_mask->type) {
272 rte_be_to_cpu_16(eth_spec->type);
276 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
279 case RTE_FLOW_ITEM_TYPE_VLAN:
280 vlan_spec = item->spec;
281 vlan_mask = item->mask;
282 if (en & en_ethertype) {
283 rte_flow_error_set(error, EINVAL,
284 RTE_FLOW_ERROR_TYPE_ITEM,
286 "VLAN TPID matching is not"
290 if (vlan_mask->tci &&
291 vlan_mask->tci == RTE_BE16(0x0fff)) {
292 /* Only the VLAN ID can be matched. */
294 rte_be_to_cpu_16(vlan_spec->tci &
296 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
298 rte_flow_error_set(error,
300 RTE_FLOW_ERROR_TYPE_ITEM,
302 "VLAN mask is invalid");
305 if (vlan_mask->inner_type &&
306 vlan_mask->inner_type != RTE_BE16(0xffff)) {
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM,
310 "inner ethertype mask not"
314 if (vlan_mask->inner_type) {
316 rte_be_to_cpu_16(vlan_spec->inner_type);
321 case RTE_FLOW_ITEM_TYPE_IPV4:
322 /* If mask is not involved, we could use EM filters. */
323 ipv4_spec = item->spec;
324 ipv4_mask = item->mask;
326 if (!item->spec || !item->mask)
329 /* Only IP DST and SRC fields are maskable. */
330 if (ipv4_mask->hdr.version_ihl ||
331 ipv4_mask->hdr.type_of_service ||
332 ipv4_mask->hdr.total_length ||
333 ipv4_mask->hdr.packet_id ||
334 ipv4_mask->hdr.fragment_offset ||
335 ipv4_mask->hdr.time_to_live ||
336 ipv4_mask->hdr.next_proto_id ||
337 ipv4_mask->hdr.hdr_checksum) {
338 rte_flow_error_set(error,
340 RTE_FLOW_ERROR_TYPE_ITEM,
342 "Invalid IPv4 mask.");
346 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
347 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
350 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
351 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
353 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
354 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
356 if (ipv4_mask->hdr.src_addr) {
357 filter->src_ipaddr_mask[0] =
358 ipv4_mask->hdr.src_addr;
359 en |= !use_ntuple ? 0 :
360 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
363 if (ipv4_mask->hdr.dst_addr) {
364 filter->dst_ipaddr_mask[0] =
365 ipv4_mask->hdr.dst_addr;
366 en |= !use_ntuple ? 0 :
367 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
370 filter->ip_addr_type = use_ntuple ?
371 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
372 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
374 if (ipv4_spec->hdr.next_proto_id) {
375 filter->ip_protocol =
376 ipv4_spec->hdr.next_proto_id;
378 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
380 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
383 case RTE_FLOW_ITEM_TYPE_IPV6:
384 ipv6_spec = item->spec;
385 ipv6_mask = item->mask;
387 if (!item->spec || !item->mask)
390 /* Only IP DST and SRC fields are maskable. */
391 if (ipv6_mask->hdr.vtc_flow ||
392 ipv6_mask->hdr.payload_len ||
393 ipv6_mask->hdr.proto ||
394 ipv6_mask->hdr.hop_limits) {
395 rte_flow_error_set(error,
397 RTE_FLOW_ERROR_TYPE_ITEM,
399 "Invalid IPv6 mask.");
404 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
405 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
407 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
408 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
410 rte_memcpy(filter->src_ipaddr,
411 ipv6_spec->hdr.src_addr, 16);
412 rte_memcpy(filter->dst_ipaddr,
413 ipv6_spec->hdr.dst_addr, 16);
415 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
417 rte_memcpy(filter->src_ipaddr_mask,
418 ipv6_mask->hdr.src_addr, 16);
419 en |= !use_ntuple ? 0 :
420 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
423 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
425 rte_memcpy(filter->dst_ipaddr_mask,
426 ipv6_mask->hdr.dst_addr, 16);
427 en |= !use_ntuple ? 0 :
428 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
431 filter->ip_addr_type = use_ntuple ?
432 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
433 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
435 case RTE_FLOW_ITEM_TYPE_TCP:
436 tcp_spec = item->spec;
437 tcp_mask = item->mask;
439 if (!item->spec || !item->mask)
442 /* Check TCP mask. Only DST & SRC ports are maskable */
443 if (tcp_mask->hdr.sent_seq ||
444 tcp_mask->hdr.recv_ack ||
445 tcp_mask->hdr.data_off ||
446 tcp_mask->hdr.tcp_flags ||
447 tcp_mask->hdr.rx_win ||
448 tcp_mask->hdr.cksum ||
449 tcp_mask->hdr.tcp_urp) {
450 rte_flow_error_set(error,
452 RTE_FLOW_ERROR_TYPE_ITEM,
458 filter->src_port = tcp_spec->hdr.src_port;
459 filter->dst_port = tcp_spec->hdr.dst_port;
462 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
463 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
465 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
466 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
468 if (tcp_mask->hdr.dst_port) {
469 filter->dst_port_mask = tcp_mask->hdr.dst_port;
470 en |= !use_ntuple ? 0 :
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
474 if (tcp_mask->hdr.src_port) {
475 filter->src_port_mask = tcp_mask->hdr.src_port;
476 en |= !use_ntuple ? 0 :
477 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
480 case RTE_FLOW_ITEM_TYPE_UDP:
481 udp_spec = item->spec;
482 udp_mask = item->mask;
484 if (!item->spec || !item->mask)
487 if (udp_mask->hdr.dgram_len ||
488 udp_mask->hdr.dgram_cksum) {
489 rte_flow_error_set(error,
491 RTE_FLOW_ERROR_TYPE_ITEM,
497 filter->src_port = udp_spec->hdr.src_port;
498 filter->dst_port = udp_spec->hdr.dst_port;
501 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
502 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
504 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
505 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
507 if (udp_mask->hdr.dst_port) {
508 filter->dst_port_mask = udp_mask->hdr.dst_port;
509 en |= !use_ntuple ? 0 :
510 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
513 if (udp_mask->hdr.src_port) {
514 filter->src_port_mask = udp_mask->hdr.src_port;
515 en |= !use_ntuple ? 0 :
516 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
519 case RTE_FLOW_ITEM_TYPE_VXLAN:
520 vxlan_spec = item->spec;
521 vxlan_mask = item->mask;
522 /* Check if VXLAN item is used to describe protocol.
523 * If yes, both spec and mask should be NULL.
524 * If no, both spec and mask shouldn't be NULL.
526 if ((!vxlan_spec && vxlan_mask) ||
527 (vxlan_spec && !vxlan_mask)) {
528 rte_flow_error_set(error,
530 RTE_FLOW_ERROR_TYPE_ITEM,
532 "Invalid VXLAN item");
536 if (!vxlan_spec && !vxlan_mask) {
537 filter->tunnel_type =
538 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
542 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
543 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
544 vxlan_spec->flags != 0x8) {
545 rte_flow_error_set(error,
547 RTE_FLOW_ERROR_TYPE_ITEM,
549 "Invalid VXLAN item");
553 /* Check if VNI is masked. */
554 if (vxlan_spec && vxlan_mask) {
556 !!memcmp(vxlan_mask->vni, vni_mask,
562 RTE_FLOW_ERROR_TYPE_ITEM,
568 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
571 rte_be_to_cpu_32(tenant_id_be);
572 filter->tunnel_type =
573 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
576 case RTE_FLOW_ITEM_TYPE_NVGRE:
577 nvgre_spec = item->spec;
578 nvgre_mask = item->mask;
579 /* Check if NVGRE item is used to describe protocol.
580 * If yes, both spec and mask should be NULL.
581 * If no, both spec and mask shouldn't be NULL.
583 if ((!nvgre_spec && nvgre_mask) ||
584 (nvgre_spec && !nvgre_mask)) {
585 rte_flow_error_set(error,
587 RTE_FLOW_ERROR_TYPE_ITEM,
589 "Invalid NVGRE item");
593 if (!nvgre_spec && !nvgre_mask) {
594 filter->tunnel_type =
595 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
599 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
600 nvgre_spec->protocol != 0x6558) {
601 rte_flow_error_set(error,
603 RTE_FLOW_ERROR_TYPE_ITEM,
605 "Invalid NVGRE item");
609 if (nvgre_spec && nvgre_mask) {
611 !!memcmp(nvgre_mask->tni, tni_mask,
617 RTE_FLOW_ERROR_TYPE_ITEM,
622 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
625 rte_be_to_cpu_32(tenant_id_be);
626 filter->tunnel_type =
627 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
631 case RTE_FLOW_ITEM_TYPE_GRE:
632 gre_spec = (const struct rte_flow_item_gre *)item->spec;
633 gre_mask = (const struct rte_flow_item_gre *)item->mask;
636 *Check if GRE item is used to describe protocol.
637 * If yes, both spec and mask should be NULL.
638 * If no, both spec and mask shouldn't be NULL.
640 if (!!gre_spec ^ !!gre_mask) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
648 if (!gre_spec && !gre_mask) {
649 filter->tunnel_type =
650 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
655 case RTE_FLOW_ITEM_TYPE_VF:
656 vf_spec = item->spec;
659 rte_flow_error_set(error,
661 RTE_FLOW_ERROR_TYPE_ITEM,
663 "Configuring on a VF!");
667 if (vf >= bp->pdev->max_vfs) {
668 rte_flow_error_set(error,
670 RTE_FLOW_ERROR_TYPE_ITEM,
676 if (!attr->transfer) {
677 rte_flow_error_set(error,
679 RTE_FLOW_ERROR_TYPE_ITEM,
681 "Matching VF traffic without"
682 " affecting it (transfer attribute)"
687 filter->mirror_vnic_id =
688 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
690 /* This simply indicates there's no driver
691 * loaded. This is not an error.
696 RTE_FLOW_ERROR_TYPE_ITEM,
698 "Unable to get default VNIC for VF");
702 filter->mirror_vnic_id = dflt_vnic;
703 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
710 filter->enables = en;
711 filter->valid_flags = valid_flags;
716 /* Parse attributes */
718 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
719 struct rte_flow_error *error)
721 /* Must be input direction */
722 if (!attr->ingress) {
723 rte_flow_error_set(error,
725 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
727 "Only support ingress.");
733 rte_flow_error_set(error,
735 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
737 "No support for egress.");
744 static struct bnxt_filter_info *
745 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
747 struct bnxt_filter_info *mf, *f0;
748 struct bnxt_vnic_info *vnic0;
751 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
752 f0 = STAILQ_FIRST(&vnic0->filter);
754 /* This flow has same DST MAC as the port/l2 filter. */
755 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
758 for (i = bp->max_vnics - 1; i >= 0; i--) {
759 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
761 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
764 STAILQ_FOREACH(mf, &vnic->filter, next) {
766 if (mf->matching_l2_fltr_ptr)
769 if (mf->ethertype == nf->ethertype &&
770 mf->l2_ovlan == nf->l2_ovlan &&
771 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
772 mf->l2_ivlan == nf->l2_ivlan &&
773 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
774 !memcmp(mf->src_macaddr, nf->src_macaddr,
775 RTE_ETHER_ADDR_LEN) &&
776 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
784 static struct bnxt_filter_info *
785 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
786 struct bnxt_vnic_info *vnic)
788 struct bnxt_filter_info *filter1;
791 /* Alloc new L2 filter.
792 * This flow needs MAC filter which does not match any existing
795 filter1 = bnxt_get_unused_filter(bp);
799 memcpy(filter1, nf, sizeof(*filter1));
801 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
802 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
803 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
804 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
806 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
807 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
810 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
811 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
812 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
813 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
815 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
816 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
818 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
819 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
823 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
824 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
825 /* Tell the FW where to place the filter in the table. */
826 if (nf->priority > 65535) {
828 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
829 /* This will place the filter in TCAM */
830 filter1->l2_filter_id_hint = (uint64_t)-1;
834 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
835 BNXT_FLOW_L2_SRC_VALID_FLAG |
836 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
837 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
839 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
840 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
841 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
844 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
846 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
847 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
848 /* Num VLANs for drop filter will/should be 0.
849 * If the req is memset to 0, then the count will
850 * be automatically set to 0.
852 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
854 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
857 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
859 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
864 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
867 bnxt_free_filter(bp, filter1);
873 struct bnxt_filter_info *
874 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
875 struct bnxt_vnic_info *vnic)
877 struct bnxt_filter_info *l2_filter = NULL;
879 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
881 l2_filter->l2_ref_cnt++;
883 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
885 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
886 l2_filter->vnic = vnic;
889 nf->matching_l2_fltr_ptr = l2_filter;
894 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
896 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
897 uint64_t rx_offloads = dev_conf->rxmode.offloads;
900 rc = bnxt_vnic_grp_alloc(bp, vnic);
904 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
906 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
911 /* RSS context is required only when there is more than one RSS ring */
912 if (vnic->rx_queue_cnt > 1) {
913 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
916 "HWRM vnic ctx alloc failure: %x\n", rc);
920 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
923 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
924 vnic->vlan_strip = true;
926 vnic->vlan_strip = false;
928 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
932 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
938 static int match_vnic_rss_cfg(struct bnxt *bp,
939 struct bnxt_vnic_info *vnic,
940 const struct rte_flow_action_rss *rss)
942 unsigned int match = 0, i;
944 if (vnic->rx_queue_cnt != rss->queue_num)
947 for (i = 0; i < rss->queue_num; i++) {
948 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
949 !bp->rx_queues[rss->queue[i]]->rx_started)
953 for (i = 0; i < vnic->rx_queue_cnt; i++) {
956 for (j = 0; j < vnic->rx_queue_cnt; j++) {
957 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
963 if (match != vnic->rx_queue_cnt) {
965 "VNIC queue count %d vs queues matched %d\n",
966 match, vnic->rx_queue_cnt);
974 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
975 struct bnxt_filter_info *filter1,
979 !(filter->valid_flags &
980 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
981 BNXT_FLOW_L2_SRC_VALID_FLAG |
982 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
983 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
984 BNXT_FLOW_L2_DROP_FLAG |
985 BNXT_FLOW_PARSE_INNER_FLAG))) {
986 filter->flags = filter1->flags;
987 filter->enables = filter1->enables;
988 filter->filter_type = HWRM_CFA_L2_FILTER;
989 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
990 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
991 filter->pri_hint = filter1->pri_hint;
992 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
994 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
995 filter->l2_ref_cnt = filter1->l2_ref_cnt;
996 filter->flow_id = filter1->flow_id;
998 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
999 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1003 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1004 const struct rte_flow_item pattern[],
1005 const struct rte_flow_action actions[],
1006 const struct rte_flow_attr *attr,
1007 struct rte_flow_error *error,
1008 struct bnxt_filter_info *filter)
1010 const struct rte_flow_action *act =
1011 bnxt_flow_non_void_action(actions);
1012 struct bnxt *bp = dev->data->dev_private;
1013 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1014 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1015 const struct rte_flow_action_queue *act_q;
1016 const struct rte_flow_action_vf *act_vf;
1017 struct bnxt_filter_info *filter1 = NULL;
1018 const struct rte_flow_action_rss *rss;
1019 struct bnxt_rx_queue *rxq = NULL;
1020 int dflt_vnic, vnic_id;
1021 unsigned int rss_idx;
1026 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1030 rc = bnxt_flow_parse_attr(attr, error);
1034 /* Since we support ingress attribute only - right now. */
1035 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1036 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1038 use_ntuple = bnxt_filter_type_check(pattern, error);
1041 switch (act->type) {
1042 case RTE_FLOW_ACTION_TYPE_QUEUE:
1043 /* Allow this flow. Redirect to a VNIC. */
1044 act_q = (const struct rte_flow_action_queue *)act->conf;
1045 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1046 rte_flow_error_set(error,
1048 RTE_FLOW_ERROR_TYPE_ACTION,
1050 "Invalid queue ID.");
1054 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1056 vnic_id = attr->group;
1058 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1059 vnic_id = act_q->index;
1062 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1064 vnic = &bp->vnic_info[vnic_id];
1065 if (vnic->rx_queue_cnt) {
1066 if (vnic->start_grp_id != act_q->index) {
1068 "VNIC already in use\n");
1069 rte_flow_error_set(error,
1071 RTE_FLOW_ERROR_TYPE_ACTION,
1073 "VNIC already in use");
1080 rxq = bp->rx_queues[act_q->index];
1082 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1083 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1088 "Queue invalid or used with other VNIC\n");
1089 rte_flow_error_set(error,
1091 RTE_FLOW_ERROR_TYPE_ACTION,
1093 "Queue invalid queue or in use");
1099 rxq->rx_started = 1;
1100 vnic->rx_queue_cnt++;
1101 vnic->start_grp_id = act_q->index;
1102 vnic->end_grp_id = act_q->index;
1103 vnic->func_default = 0; //This is not a default VNIC.
1105 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1107 rc = bnxt_vnic_prep(bp, vnic);
1109 rte_flow_error_set(error,
1111 RTE_FLOW_ERROR_TYPE_ACTION,
1119 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1120 act_q->index, vnic, vnic->fw_grp_ids);
1123 vnic->ff_pool_idx = vnic_id;
1125 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1126 filter->dst_id = vnic->fw_vnic_id;
1128 /* For ntuple filter, create the L2 filter with default VNIC.
1129 * The user specified redirect queue will be set while creating
1130 * the ntuple filter in hardware.
1132 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1134 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1136 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1137 if (filter1 == NULL) {
1138 rte_flow_error_set(error,
1140 RTE_FLOW_ERROR_TYPE_ACTION,
1142 "Filter not available");
1147 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1148 filter, filter1, filter1->l2_ref_cnt);
1149 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1151 case RTE_FLOW_ACTION_TYPE_DROP:
1152 vnic0 = &bp->vnic_info[0];
1153 filter->dst_id = vnic0->fw_vnic_id;
1154 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1155 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1156 if (filter1 == NULL) {
1157 rte_flow_error_set(error,
1159 RTE_FLOW_ERROR_TYPE_ACTION,
1161 "Filter not available");
1166 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1168 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1169 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1171 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1173 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1175 case RTE_FLOW_ACTION_TYPE_COUNT:
1176 vnic0 = &bp->vnic_info[0];
1177 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1178 if (filter1 == NULL) {
1179 rte_flow_error_set(error,
1181 RTE_FLOW_ERROR_TYPE_ACTION,
1183 "New filter not available");
1188 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1189 filter->flow_id = filter1->flow_id;
1190 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1192 case RTE_FLOW_ACTION_TYPE_VF:
1193 act_vf = (const struct rte_flow_action_vf *)act->conf;
1196 if (filter->tunnel_type ==
1197 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1198 filter->tunnel_type ==
1199 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1200 /* If issued on a VF, ensure id is 0 and is trusted */
1202 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ACTION,
1212 filter->enables |= filter->tunnel_type;
1213 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1217 if (vf >= bp->pdev->max_vfs) {
1218 rte_flow_error_set(error,
1220 RTE_FLOW_ERROR_TYPE_ACTION,
1222 "Incorrect VF id!");
1227 filter->mirror_vnic_id =
1228 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1229 if (dflt_vnic < 0) {
1230 /* This simply indicates there's no driver loaded.
1231 * This is not an error.
1233 rte_flow_error_set(error,
1235 RTE_FLOW_ERROR_TYPE_ACTION,
1237 "Unable to get default VNIC for VF");
1242 filter->mirror_vnic_id = dflt_vnic;
1243 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1245 vnic0 = &bp->vnic_info[0];
1246 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1247 if (filter1 == NULL) {
1248 rte_flow_error_set(error,
1250 RTE_FLOW_ERROR_TYPE_ACTION,
1252 "New filter not available");
1257 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1258 filter->flow_id = filter1->flow_id;
1260 case RTE_FLOW_ACTION_TYPE_RSS:
1261 rss = (const struct rte_flow_action_rss *)act->conf;
1263 vnic_id = attr->group;
1265 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1267 vnic = &bp->vnic_info[vnic_id];
1269 /* Check if requested RSS config matches RSS config of VNIC
1270 * only if it is not a fresh VNIC configuration.
1271 * Otherwise the existing VNIC configuration can be used.
1273 if (vnic->rx_queue_cnt) {
1274 rc = match_vnic_rss_cfg(bp, vnic, rss);
1277 "VNIC and RSS config mismatch\n");
1278 rte_flow_error_set(error,
1280 RTE_FLOW_ERROR_TYPE_ACTION,
1282 "VNIC and RSS cfg mismatch");
1289 for (i = 0; i < rss->queue_num; i++) {
1290 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1293 if (!rss->queue[i] ||
1294 rss->queue[i] >= bp->rx_nr_rings ||
1295 !bp->rx_queues[rss->queue[i]]) {
1296 rte_flow_error_set(error,
1298 RTE_FLOW_ERROR_TYPE_ACTION,
1300 "Invalid queue ID for RSS");
1304 rxq = bp->rx_queues[rss->queue[i]];
1306 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1307 INVALID_HW_RING_ID) {
1309 "queue active with other VNIC\n");
1310 rte_flow_error_set(error,
1312 RTE_FLOW_ERROR_TYPE_ACTION,
1314 "Invalid queue ID for RSS");
1320 rxq->rx_started = 1;
1321 vnic->rx_queue_cnt++;
1324 vnic->start_grp_id = rss->queue[0];
1325 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1326 vnic->func_default = 0; //This is not a default VNIC.
1328 rc = bnxt_vnic_prep(bp, vnic);
1330 rte_flow_error_set(error,
1332 RTE_FLOW_ERROR_TYPE_ACTION,
1340 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1341 vnic_id, vnic, vnic->fw_grp_ids);
1343 vnic->ff_pool_idx = vnic_id;
1345 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1347 /* This can be done only after vnic_grp_alloc is done. */
1348 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1349 vnic->fw_grp_ids[i] =
1350 bp->grp_info[rss->queue[i]].fw_grp_id;
1351 /* Make sure vnic0 does not use these rings. */
1352 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1356 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1357 for (i = 0; i < vnic->rx_queue_cnt; i++)
1358 vnic->rss_table[rss_idx++] =
1359 vnic->fw_grp_ids[i];
1362 /* Configure RSS only if the queue count is > 1 */
1363 if (vnic->rx_queue_cnt > 1) {
1365 bnxt_rte_to_hwrm_hash_types(rss->types);
1367 if (!rss->key_len) {
1368 /* If hash key has not been specified,
1369 * use random hash key.
1371 prandom_bytes(vnic->rss_hash_key,
1374 if (rss->key_len > HW_HASH_KEY_SIZE)
1375 memcpy(vnic->rss_hash_key,
1379 memcpy(vnic->rss_hash_key,
1383 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1385 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1389 filter->dst_id = vnic->fw_vnic_id;
1390 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1391 if (filter1 == NULL) {
1392 rte_flow_error_set(error,
1394 RTE_FLOW_ERROR_TYPE_ACTION,
1396 "New filter not available");
1401 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1402 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1404 case RTE_FLOW_ACTION_TYPE_MARK:
1405 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1407 "Disable vector processing for mark\n");
1408 rte_flow_error_set(error,
1410 RTE_FLOW_ERROR_TYPE_ACTION,
1412 "Disable vector processing for mark");
1417 if (bp->mark_table == NULL) {
1418 rte_flow_error_set(error,
1420 RTE_FLOW_ERROR_TYPE_ACTION,
1422 "Mark table not allocated.");
1427 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1428 filter->mark = ((const struct rte_flow_action_mark *)
1430 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1433 rte_flow_error_set(error,
1435 RTE_FLOW_ERROR_TYPE_ACTION,
1443 act = bnxt_flow_non_void_action(++act);
1444 while (act->type != RTE_FLOW_ACTION_TYPE_END)
1451 bnxt_hwrm_clear_l2_filter(bp, filter1);
1452 bnxt_free_filter(bp, filter1);
1456 if (vnic && STAILQ_EMPTY(&vnic->filter))
1457 vnic->rx_queue_cnt = 0;
1459 if (rxq && !vnic->rx_queue_cnt)
1460 rxq->vnic = &bp->vnic_info[0];
1466 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1467 struct bnxt_filter_info *filter)
1469 struct bnxt_vnic_info *vnic = NULL;
1472 for (i = 0; i < bp->max_vnics; i++) {
1473 vnic = &bp->vnic_info[i];
1474 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1475 filter->dst_id == vnic->fw_vnic_id) {
1476 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1485 bnxt_flow_validate(struct rte_eth_dev *dev,
1486 const struct rte_flow_attr *attr,
1487 const struct rte_flow_item pattern[],
1488 const struct rte_flow_action actions[],
1489 struct rte_flow_error *error)
1491 struct bnxt *bp = dev->data->dev_private;
1492 struct bnxt_vnic_info *vnic = NULL;
1493 struct bnxt_filter_info *filter;
1496 bnxt_acquire_flow_lock(bp);
1497 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1499 bnxt_release_flow_lock(bp);
1503 filter = bnxt_get_unused_filter(bp);
1504 if (filter == NULL) {
1505 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1506 bnxt_release_flow_lock(bp);
1510 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1515 vnic = find_matching_vnic(bp, filter);
1517 if (STAILQ_EMPTY(&vnic->filter)) {
1518 rte_free(vnic->fw_grp_ids);
1519 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1520 bnxt_hwrm_vnic_free(bp, vnic);
1521 vnic->rx_queue_cnt = 0;
1522 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1526 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1527 bnxt_hwrm_clear_em_filter(bp, filter);
1528 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1529 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1531 bnxt_hwrm_clear_l2_filter(bp, filter);
1534 /* No need to hold on to this filter if we are just validating flow */
1535 bnxt_free_filter(bp, filter);
1536 bnxt_release_flow_lock(bp);
1542 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1543 struct bnxt_filter_info *new_filter)
1545 /* Clear the new L2 filter that was created in the previous step in
1546 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1547 * filter which points to the new destination queue and so we clear
1548 * the previous L2 filter. For ntuple filters, we are going to reuse
1549 * the old L2 filter and create new NTUPLE filter with this new
1550 * destination queue subsequently during bnxt_flow_create. So we
1551 * decrement the ref cnt of the L2 filter that would've been bumped
1552 * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1553 * filter that was referencing it will be deleted now.
1555 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1556 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1557 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1559 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1560 bnxt_hwrm_clear_em_filter(bp, old_filter);
1561 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1562 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1567 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1569 struct bnxt_filter_info *mf;
1570 struct rte_flow *flow;
1573 for (i = bp->max_vnics - 1; i >= 0; i--) {
1574 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1576 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1579 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1582 if (mf->filter_type == nf->filter_type &&
1583 mf->flags == nf->flags &&
1584 mf->src_port == nf->src_port &&
1585 mf->src_port_mask == nf->src_port_mask &&
1586 mf->dst_port == nf->dst_port &&
1587 mf->dst_port_mask == nf->dst_port_mask &&
1588 mf->ip_protocol == nf->ip_protocol &&
1589 mf->ip_addr_type == nf->ip_addr_type &&
1590 mf->ethertype == nf->ethertype &&
1591 mf->vni == nf->vni &&
1592 mf->tunnel_type == nf->tunnel_type &&
1593 mf->l2_ovlan == nf->l2_ovlan &&
1594 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1595 mf->l2_ivlan == nf->l2_ivlan &&
1596 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1597 !memcmp(mf->l2_addr, nf->l2_addr,
1598 RTE_ETHER_ADDR_LEN) &&
1599 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1600 RTE_ETHER_ADDR_LEN) &&
1601 !memcmp(mf->src_macaddr, nf->src_macaddr,
1602 RTE_ETHER_ADDR_LEN) &&
1603 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1604 RTE_ETHER_ADDR_LEN) &&
1605 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1606 sizeof(nf->src_ipaddr)) &&
1607 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1608 sizeof(nf->src_ipaddr_mask)) &&
1609 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1610 sizeof(nf->dst_ipaddr)) &&
1611 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1612 sizeof(nf->dst_ipaddr_mask))) {
1613 if (mf->dst_id == nf->dst_id)
1615 /* Free the old filter, update flow
1618 bnxt_update_filter(bp, mf, nf);
1619 STAILQ_REMOVE(&vnic->filter, mf,
1620 bnxt_filter_info, next);
1621 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1622 bnxt_free_filter(bp, mf);
1631 static struct rte_flow *
1632 bnxt_flow_create(struct rte_eth_dev *dev,
1633 const struct rte_flow_attr *attr,
1634 const struct rte_flow_item pattern[],
1635 const struct rte_flow_action actions[],
1636 struct rte_flow_error *error)
1638 struct bnxt *bp = dev->data->dev_private;
1639 struct bnxt_vnic_info *vnic = NULL;
1640 struct bnxt_filter_info *filter;
1641 bool update_flow = false;
1642 struct rte_flow *flow;
1646 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1647 rte_flow_error_set(error, EINVAL,
1648 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1649 "Failed to create flow, Not a Trusted VF!");
1653 if (!dev->data->dev_started) {
1654 rte_flow_error_set(error,
1656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1658 "Device must be started");
1662 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1664 rte_flow_error_set(error, ENOMEM,
1665 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1666 "Failed to allocate memory");
1670 bnxt_acquire_flow_lock(bp);
1671 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1673 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1677 filter = bnxt_get_unused_filter(bp);
1678 if (filter == NULL) {
1679 rte_flow_error_set(error, ENOSPC,
1680 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1681 "Not enough resources for a new flow");
1685 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1690 ret = bnxt_match_filter(bp, filter);
1691 if (ret == -EEXIST) {
1692 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1693 /* Clear the filter that was created as part of
1694 * validate_and_parse_flow() above
1696 bnxt_hwrm_clear_l2_filter(bp, filter);
1698 } else if (ret == -EXDEV) {
1699 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1700 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1704 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1705 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1708 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1709 filter->enables == filter->tunnel_type) {
1710 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1712 rte_flow_error_set(error, -ret,
1713 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1714 "Unable to query tunnel to VF");
1717 if (tun_type == (1U << filter->tunnel_type)) {
1719 bnxt_hwrm_tunnel_redirect_free(bp,
1720 filter->tunnel_type);
1723 "Unable to free existing tunnel\n");
1724 rte_flow_error_set(error, -ret,
1725 RTE_FLOW_ERROR_TYPE_HANDLE,
1727 "Unable to free preexisting "
1732 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1734 rte_flow_error_set(error, -ret,
1735 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1736 "Unable to redirect tunnel to VF");
1739 vnic = &bp->vnic_info[0];
1743 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1745 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1746 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1749 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1751 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1752 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1755 vnic = find_matching_vnic(bp, filter);
1757 if (!ret || update_flow) {
1758 flow->filter = filter;
1765 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1766 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1767 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1768 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1770 "Mark action: mark id 0x%x, flow id 0x%x\n",
1771 filter->mark, filter->flow_id);
1773 /* TCAM and EM should be 16-bit only.
1774 * Other modes not supported.
1776 bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] =
1779 bnxt_release_flow_lock(bp);
1784 bnxt_free_filter(bp, filter);
1787 rte_flow_error_set(error, ret,
1788 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1789 "Matching Flow exists.");
1790 else if (ret == -EXDEV)
1791 rte_flow_error_set(error, 0,
1792 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1793 "Flow with pattern exists, updating destination queue");
1794 else if (!rte_errno)
1795 rte_flow_error_set(error, -ret,
1796 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1797 "Failed to create flow.");
1800 bnxt_release_flow_lock(bp);
1804 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1805 struct bnxt_filter_info *filter,
1806 struct rte_flow_error *error)
1808 uint16_t tun_dst_fid;
1812 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1814 rte_flow_error_set(error, -ret,
1815 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1816 "Unable to query tunnel to VF");
1819 if (tun_type == (1U << filter->tunnel_type)) {
1820 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1823 rte_flow_error_set(error, -ret,
1824 RTE_FLOW_ERROR_TYPE_HANDLE,
1826 "tunnel_redirect info cmd fail");
1829 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1830 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1832 /* Tunnel doesn't belong to this VF, so don't send HWRM
1833 * cmd, just delete the flow from driver
1835 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1837 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1839 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1840 filter->tunnel_type);
1846 _bnxt_flow_destroy(struct bnxt *bp,
1847 struct rte_flow *flow,
1848 struct rte_flow_error *error)
1850 struct bnxt_filter_info *filter;
1851 struct bnxt_vnic_info *vnic;
1854 filter = flow->filter;
1857 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1858 filter->enables == filter->tunnel_type) {
1859 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1866 ret = bnxt_match_filter(bp, filter);
1868 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1870 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1871 bp->mark_table[filter->flow_id & BNXT_FLOW_ID_MASK] = 0;
1872 filter->flow_id = 0;
1875 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1876 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1877 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1878 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1879 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1883 /* If it is a L2 drop filter, when the filter is created,
1884 * the FW updates the BC/MC records.
1885 * Once this filter is removed, issue the set_rx_mask command
1886 * to reset the BC/MC records in the HW to the settings
1887 * before the drop counter is created.
1889 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1890 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1892 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1893 bnxt_free_filter(bp, filter);
1894 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1897 /* If this was the last flow associated with this vnic,
1898 * switch the queue back to RSS pool.
1900 if (vnic && !vnic->func_default &&
1901 STAILQ_EMPTY(&vnic->flow_list)) {
1902 rte_free(vnic->fw_grp_ids);
1903 if (vnic->rx_queue_cnt > 1)
1904 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1906 bnxt_hwrm_vnic_free(bp, vnic);
1907 vnic->rx_queue_cnt = 0;
1910 rte_flow_error_set(error, -ret,
1911 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1912 "Failed to destroy flow.");
1919 bnxt_flow_destroy(struct rte_eth_dev *dev,
1920 struct rte_flow *flow,
1921 struct rte_flow_error *error)
1923 struct bnxt *bp = dev->data->dev_private;
1926 bnxt_acquire_flow_lock(bp);
1928 rte_flow_error_set(error, EINVAL,
1929 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1930 "Invalid flow: failed to destroy flow.");
1931 bnxt_release_flow_lock(bp);
1935 if (!flow->filter) {
1936 rte_flow_error_set(error, EINVAL,
1937 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1938 "Invalid flow: failed to destroy flow.");
1939 bnxt_release_flow_lock(bp);
1942 ret = _bnxt_flow_destroy(bp, flow, error);
1943 bnxt_release_flow_lock(bp);
1949 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1951 struct bnxt *bp = dev->data->dev_private;
1952 struct bnxt_vnic_info *vnic;
1953 struct rte_flow *flow;
1957 bnxt_acquire_flow_lock(bp);
1958 for (i = 0; i < bp->max_vnics; i++) {
1959 vnic = &bp->vnic_info[i];
1960 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
1963 while (!STAILQ_EMPTY(&vnic->flow_list)) {
1964 flow = STAILQ_FIRST(&vnic->flow_list);
1969 ret = _bnxt_flow_destroy(bp, flow, error);
1974 bnxt_release_flow_lock(bp);
1979 const struct rte_flow_ops bnxt_flow_ops = {
1980 .validate = bnxt_flow_validate,
1981 .create = bnxt_flow_create,
1982 .destroy = bnxt_flow_destroy,
1983 .flush = bnxt_flow_flush,