1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
13 #include <rte_alarm.h>
14 #include <rte_cycles.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
19 #include "bnxt_ring.h"
21 #include "bnxt_vnic.h"
22 #include "hsi_struct_def_dpdk.h"
25 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
26 const struct rte_flow_item pattern[],
27 const struct rte_flow_action actions[],
28 struct rte_flow_error *error)
31 rte_flow_error_set(error,
33 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
40 rte_flow_error_set(error,
42 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
49 rte_flow_error_set(error,
51 RTE_FLOW_ERROR_TYPE_ATTR,
60 static const struct rte_flow_item *
61 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
64 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
70 static const struct rte_flow_action *
71 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
74 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
81 bnxt_filter_type_check(const struct rte_flow_item pattern[],
82 struct rte_flow_error *error)
84 const struct rte_flow_item *item =
85 bnxt_flow_non_void_item(pattern);
89 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91 case RTE_FLOW_ITEM_TYPE_ANY:
92 case RTE_FLOW_ITEM_TYPE_ETH:
95 case RTE_FLOW_ITEM_TYPE_VLAN:
99 case RTE_FLOW_ITEM_TYPE_IPV4:
100 case RTE_FLOW_ITEM_TYPE_IPV6:
101 case RTE_FLOW_ITEM_TYPE_TCP:
102 case RTE_FLOW_ITEM_TYPE_UDP:
104 /* need ntuple match, reset exact match */
108 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
114 if (has_vlan && use_ntuple) {
116 "VLAN flow cannot use NTUPLE filter\n");
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ITEM,
120 "Cannot use VLAN with NTUPLE");
128 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
129 const struct rte_flow_attr *attr,
130 const struct rte_flow_item pattern[],
131 struct rte_flow_error *error,
132 struct bnxt_filter_info *filter)
134 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
135 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
136 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
137 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
138 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
139 const struct rte_flow_item_udp *udp_spec, *udp_mask;
140 const struct rte_flow_item_eth *eth_spec, *eth_mask;
141 const struct rte_ether_addr *dst, *src;
142 const struct rte_flow_item_nvgre *nvgre_spec;
143 const struct rte_flow_item_nvgre *nvgre_mask;
144 const struct rte_flow_item_gre *gre_spec;
145 const struct rte_flow_item_gre *gre_mask;
146 const struct rte_flow_item_vxlan *vxlan_spec;
147 const struct rte_flow_item_vxlan *vxlan_mask;
148 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
149 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
150 const struct rte_flow_item_vf *vf_spec;
151 uint32_t tenant_id_be = 0, valid_flags = 0;
154 uint32_t en_ethertype;
161 use_ntuple = bnxt_filter_type_check(pattern, error);
164 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
166 filter->filter_type = use_ntuple ?
167 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_L2_FILTER;
168 en_ethertype = use_ntuple ?
169 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
170 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
172 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
174 /* last or range is NOT supported as match criteria */
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ITEM,
178 "No support for range");
182 switch (item->type) {
183 case RTE_FLOW_ITEM_TYPE_ANY:
185 ((const struct rte_flow_item_any *)item->spec)->num > 3;
187 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
189 case RTE_FLOW_ITEM_TYPE_ETH:
190 if (!item->spec || !item->mask)
193 eth_spec = item->spec;
194 eth_mask = item->mask;
196 /* Source MAC address mask cannot be partially set.
197 * Should be All 0's or all 1's.
198 * Destination MAC address mask must not be partially
199 * set. Should be all 1's or all 0's.
201 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
202 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
203 (!rte_is_zero_ether_addr(ð_mask->dst) &&
204 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
205 rte_flow_error_set(error,
207 RTE_FLOW_ERROR_TYPE_ITEM,
209 "MAC_addr mask not valid");
213 /* Mask is not allowed. Only exact matches are */
214 if (eth_mask->type &&
215 eth_mask->type != RTE_BE16(0xffff)) {
216 rte_flow_error_set(error, EINVAL,
217 RTE_FLOW_ERROR_TYPE_ITEM,
219 "ethertype mask not valid");
223 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
224 dst = ð_spec->dst;
225 if (!rte_is_valid_assigned_ether_addr(dst)) {
226 rte_flow_error_set(error,
228 RTE_FLOW_ERROR_TYPE_ITEM,
232 "DMAC is invalid!\n");
235 rte_memcpy(filter->dst_macaddr,
236 ð_spec->dst, RTE_ETHER_ADDR_LEN);
238 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
239 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
240 valid_flags |= inner ?
241 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
242 BNXT_FLOW_L2_DST_VALID_FLAG;
243 filter->priority = attr->priority;
245 "Creating a priority flow\n");
247 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
248 src = ð_spec->src;
249 if (!rte_is_valid_assigned_ether_addr(src)) {
250 rte_flow_error_set(error,
252 RTE_FLOW_ERROR_TYPE_ITEM,
256 "SMAC is invalid!\n");
259 rte_memcpy(filter->src_macaddr,
260 ð_spec->src, RTE_ETHER_ADDR_LEN);
262 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
263 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
264 valid_flags |= inner ?
265 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
266 BNXT_FLOW_L2_SRC_VALID_FLAG;
269 * PMD_DRV_LOG(ERR, "Handle this condition\n");
272 if (eth_mask->type) {
274 rte_be_to_cpu_16(eth_spec->type);
278 valid_flags |= BNXT_FLOW_PARSE_INNER_FLAG;
281 case RTE_FLOW_ITEM_TYPE_VLAN:
282 vlan_spec = item->spec;
283 vlan_mask = item->mask;
284 if (en & en_ethertype) {
285 rte_flow_error_set(error, EINVAL,
286 RTE_FLOW_ERROR_TYPE_ITEM,
288 "VLAN TPID matching is not"
292 if (vlan_mask->tci &&
293 vlan_mask->tci == RTE_BE16(0x0fff)) {
294 /* Only the VLAN ID can be matched. */
296 rte_be_to_cpu_16(vlan_spec->tci &
298 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
300 rte_flow_error_set(error,
302 RTE_FLOW_ERROR_TYPE_ITEM,
304 "VLAN mask is invalid");
307 if (vlan_mask->inner_type &&
308 vlan_mask->inner_type != RTE_BE16(0xffff)) {
309 rte_flow_error_set(error, EINVAL,
310 RTE_FLOW_ERROR_TYPE_ITEM,
312 "inner ethertype mask not"
316 if (vlan_mask->inner_type) {
318 rte_be_to_cpu_16(vlan_spec->inner_type);
323 case RTE_FLOW_ITEM_TYPE_IPV4:
324 /* If mask is not involved, we could use EM filters. */
325 ipv4_spec = item->spec;
326 ipv4_mask = item->mask;
328 if (!item->spec || !item->mask)
331 /* Only IP DST and SRC fields are maskable. */
332 if (ipv4_mask->hdr.version_ihl ||
333 ipv4_mask->hdr.type_of_service ||
334 ipv4_mask->hdr.total_length ||
335 ipv4_mask->hdr.packet_id ||
336 ipv4_mask->hdr.fragment_offset ||
337 ipv4_mask->hdr.time_to_live ||
338 ipv4_mask->hdr.next_proto_id ||
339 ipv4_mask->hdr.hdr_checksum) {
340 rte_flow_error_set(error,
342 RTE_FLOW_ERROR_TYPE_ITEM,
344 "Invalid IPv4 mask.");
348 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
349 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
352 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
353 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
355 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
356 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
358 if (ipv4_mask->hdr.src_addr) {
359 filter->src_ipaddr_mask[0] =
360 ipv4_mask->hdr.src_addr;
361 en |= !use_ntuple ? 0 :
362 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
365 if (ipv4_mask->hdr.dst_addr) {
366 filter->dst_ipaddr_mask[0] =
367 ipv4_mask->hdr.dst_addr;
368 en |= !use_ntuple ? 0 :
369 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
372 filter->ip_addr_type = use_ntuple ?
373 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
374 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
376 if (ipv4_spec->hdr.next_proto_id) {
377 filter->ip_protocol =
378 ipv4_spec->hdr.next_proto_id;
380 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
382 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
385 case RTE_FLOW_ITEM_TYPE_IPV6:
386 ipv6_spec = item->spec;
387 ipv6_mask = item->mask;
389 if (!item->spec || !item->mask)
392 /* Only IP DST and SRC fields are maskable. */
393 if (ipv6_mask->hdr.vtc_flow ||
394 ipv6_mask->hdr.payload_len ||
395 ipv6_mask->hdr.proto ||
396 ipv6_mask->hdr.hop_limits) {
397 rte_flow_error_set(error,
399 RTE_FLOW_ERROR_TYPE_ITEM,
401 "Invalid IPv6 mask.");
406 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
407 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
409 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
410 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
412 rte_memcpy(filter->src_ipaddr,
413 ipv6_spec->hdr.src_addr, 16);
414 rte_memcpy(filter->dst_ipaddr,
415 ipv6_spec->hdr.dst_addr, 16);
417 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
419 rte_memcpy(filter->src_ipaddr_mask,
420 ipv6_mask->hdr.src_addr, 16);
421 en |= !use_ntuple ? 0 :
422 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
425 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
427 rte_memcpy(filter->dst_ipaddr_mask,
428 ipv6_mask->hdr.dst_addr, 16);
429 en |= !use_ntuple ? 0 :
430 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
433 filter->ip_addr_type = use_ntuple ?
434 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
435 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
437 case RTE_FLOW_ITEM_TYPE_TCP:
438 tcp_spec = item->spec;
439 tcp_mask = item->mask;
441 if (!item->spec || !item->mask)
444 /* Check TCP mask. Only DST & SRC ports are maskable */
445 if (tcp_mask->hdr.sent_seq ||
446 tcp_mask->hdr.recv_ack ||
447 tcp_mask->hdr.data_off ||
448 tcp_mask->hdr.tcp_flags ||
449 tcp_mask->hdr.rx_win ||
450 tcp_mask->hdr.cksum ||
451 tcp_mask->hdr.tcp_urp) {
452 rte_flow_error_set(error,
454 RTE_FLOW_ERROR_TYPE_ITEM,
460 filter->src_port = tcp_spec->hdr.src_port;
461 filter->dst_port = tcp_spec->hdr.dst_port;
464 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
465 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
467 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
468 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
470 if (tcp_mask->hdr.dst_port) {
471 filter->dst_port_mask = tcp_mask->hdr.dst_port;
472 en |= !use_ntuple ? 0 :
473 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
476 if (tcp_mask->hdr.src_port) {
477 filter->src_port_mask = tcp_mask->hdr.src_port;
478 en |= !use_ntuple ? 0 :
479 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
482 case RTE_FLOW_ITEM_TYPE_UDP:
483 udp_spec = item->spec;
484 udp_mask = item->mask;
486 if (!item->spec || !item->mask)
489 if (udp_mask->hdr.dgram_len ||
490 udp_mask->hdr.dgram_cksum) {
491 rte_flow_error_set(error,
493 RTE_FLOW_ERROR_TYPE_ITEM,
499 filter->src_port = udp_spec->hdr.src_port;
500 filter->dst_port = udp_spec->hdr.dst_port;
503 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
504 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
506 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
507 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
509 if (udp_mask->hdr.dst_port) {
510 filter->dst_port_mask = udp_mask->hdr.dst_port;
511 en |= !use_ntuple ? 0 :
512 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
515 if (udp_mask->hdr.src_port) {
516 filter->src_port_mask = udp_mask->hdr.src_port;
517 en |= !use_ntuple ? 0 :
518 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
521 case RTE_FLOW_ITEM_TYPE_VXLAN:
522 vxlan_spec = item->spec;
523 vxlan_mask = item->mask;
524 /* Check if VXLAN item is used to describe protocol.
525 * If yes, both spec and mask should be NULL.
526 * If no, both spec and mask shouldn't be NULL.
528 if ((!vxlan_spec && vxlan_mask) ||
529 (vxlan_spec && !vxlan_mask)) {
530 rte_flow_error_set(error,
532 RTE_FLOW_ERROR_TYPE_ITEM,
534 "Invalid VXLAN item");
538 if (!vxlan_spec && !vxlan_mask) {
539 filter->tunnel_type =
540 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
544 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
545 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
546 vxlan_spec->flags != 0x8) {
547 rte_flow_error_set(error,
549 RTE_FLOW_ERROR_TYPE_ITEM,
551 "Invalid VXLAN item");
555 /* Check if VNI is masked. */
556 if (vxlan_spec && vxlan_mask) {
558 !!memcmp(vxlan_mask->vni, vni_mask,
564 RTE_FLOW_ERROR_TYPE_ITEM,
570 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
573 rte_be_to_cpu_32(tenant_id_be);
574 filter->tunnel_type =
575 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
578 case RTE_FLOW_ITEM_TYPE_NVGRE:
579 nvgre_spec = item->spec;
580 nvgre_mask = item->mask;
581 /* Check if NVGRE item is used to describe protocol.
582 * If yes, both spec and mask should be NULL.
583 * If no, both spec and mask shouldn't be NULL.
585 if ((!nvgre_spec && nvgre_mask) ||
586 (nvgre_spec && !nvgre_mask)) {
587 rte_flow_error_set(error,
589 RTE_FLOW_ERROR_TYPE_ITEM,
591 "Invalid NVGRE item");
595 if (!nvgre_spec && !nvgre_mask) {
596 filter->tunnel_type =
597 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
601 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
602 nvgre_spec->protocol != 0x6558) {
603 rte_flow_error_set(error,
605 RTE_FLOW_ERROR_TYPE_ITEM,
607 "Invalid NVGRE item");
611 if (nvgre_spec && nvgre_mask) {
613 !!memcmp(nvgre_mask->tni, tni_mask,
619 RTE_FLOW_ERROR_TYPE_ITEM,
624 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
627 rte_be_to_cpu_32(tenant_id_be);
628 filter->tunnel_type =
629 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
633 case RTE_FLOW_ITEM_TYPE_GRE:
634 gre_spec = (const struct rte_flow_item_gre *)item->spec;
635 gre_mask = (const struct rte_flow_item_gre *)item->mask;
638 *Check if GRE item is used to describe protocol.
639 * If yes, both spec and mask should be NULL.
640 * If no, both spec and mask shouldn't be NULL.
642 if (!!gre_spec ^ !!gre_mask) {
643 rte_flow_error_set(error, EINVAL,
644 RTE_FLOW_ERROR_TYPE_ITEM,
650 if (!gre_spec && !gre_mask) {
651 filter->tunnel_type =
652 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
657 case RTE_FLOW_ITEM_TYPE_VF:
658 vf_spec = item->spec;
661 rte_flow_error_set(error,
663 RTE_FLOW_ERROR_TYPE_ITEM,
665 "Configuring on a VF!");
669 if (vf >= bp->pdev->max_vfs) {
670 rte_flow_error_set(error,
672 RTE_FLOW_ERROR_TYPE_ITEM,
678 if (!attr->transfer) {
679 rte_flow_error_set(error,
681 RTE_FLOW_ERROR_TYPE_ITEM,
683 "Matching VF traffic without"
684 " affecting it (transfer attribute)"
689 filter->mirror_vnic_id =
690 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
692 /* This simply indicates there's no driver
693 * loaded. This is not an error.
698 RTE_FLOW_ERROR_TYPE_ITEM,
700 "Unable to get default VNIC for VF");
704 filter->mirror_vnic_id = dflt_vnic;
705 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
712 filter->enables = en;
713 filter->valid_flags = valid_flags;
718 /* Parse attributes */
720 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
721 struct rte_flow_error *error)
723 /* Must be input direction */
724 if (!attr->ingress) {
725 rte_flow_error_set(error,
727 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729 "Only support ingress.");
735 rte_flow_error_set(error,
737 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
739 "No support for egress.");
746 static struct bnxt_filter_info *
747 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
749 struct bnxt_filter_info *mf, *f0;
750 struct bnxt_vnic_info *vnic0;
753 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
754 f0 = STAILQ_FIRST(&vnic0->filter);
756 /* This flow has same DST MAC as the port/l2 filter. */
757 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
760 for (i = bp->max_vnics - 1; i >= 0; i--) {
761 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
763 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
766 STAILQ_FOREACH(mf, &vnic->filter, next) {
768 if (mf->matching_l2_fltr_ptr)
771 if (mf->ethertype == nf->ethertype &&
772 mf->l2_ovlan == nf->l2_ovlan &&
773 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
774 mf->l2_ivlan == nf->l2_ivlan &&
775 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
776 !memcmp(mf->src_macaddr, nf->src_macaddr,
777 RTE_ETHER_ADDR_LEN) &&
778 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
786 static struct bnxt_filter_info *
787 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
788 struct bnxt_vnic_info *vnic)
790 struct bnxt_filter_info *filter1;
793 /* Alloc new L2 filter.
794 * This flow needs MAC filter which does not match any existing
797 filter1 = bnxt_get_unused_filter(bp);
801 memcpy(filter1, nf, sizeof(*filter1));
803 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
804 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
805 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
806 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
808 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
809 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
812 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
813 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
814 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
815 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
817 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
818 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
820 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
821 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
825 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
826 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
827 /* Tell the FW where to place the filter in the table. */
828 if (nf->priority > 65535) {
830 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
831 /* This will place the filter in TCAM */
832 filter1->l2_filter_id_hint = (uint64_t)-1;
836 if (nf->valid_flags & (BNXT_FLOW_L2_DST_VALID_FLAG |
837 BNXT_FLOW_L2_SRC_VALID_FLAG |
838 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
839 BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
841 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
842 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
843 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
846 if (nf->valid_flags & BNXT_FLOW_L2_DROP_FLAG) {
848 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP;
849 if (nf->ethertype == RTE_ETHER_TYPE_IPV4) {
850 /* Num VLANs for drop filter will/should be 0.
851 * If the req is memset to 0, then the count will
852 * be automatically set to 0.
854 if (nf->valid_flags & BNXT_FLOW_PARSE_INNER_FLAG) {
856 L2_FILTER_ALLOC_INPUT_EN_T_NUM_VLANS;
859 L2_FILTER_ALLOC_INPUT_EN_NUM_VLANS;
861 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
866 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
869 bnxt_free_filter(bp, filter1);
875 struct bnxt_filter_info *
876 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
877 struct bnxt_vnic_info *vnic)
879 struct bnxt_filter_info *l2_filter = NULL;
881 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
883 l2_filter->l2_ref_cnt++;
885 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
887 STAILQ_INSERT_TAIL(&vnic->filter, l2_filter, next);
888 l2_filter->vnic = vnic;
891 nf->matching_l2_fltr_ptr = l2_filter;
896 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
898 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
899 uint64_t rx_offloads = dev_conf->rxmode.offloads;
902 rc = bnxt_vnic_grp_alloc(bp, vnic);
906 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
908 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
913 /* RSS context is required only when there is more than one RSS ring */
914 if (vnic->rx_queue_cnt > 1) {
915 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
918 "HWRM vnic ctx alloc failure: %x\n", rc);
922 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
925 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
926 vnic->vlan_strip = true;
928 vnic->vlan_strip = false;
930 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
934 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
940 static int match_vnic_rss_cfg(struct bnxt *bp,
941 struct bnxt_vnic_info *vnic,
942 const struct rte_flow_action_rss *rss)
944 unsigned int match = 0, i;
946 if (vnic->rx_queue_cnt != rss->queue_num)
949 for (i = 0; i < rss->queue_num; i++) {
950 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
951 !bp->rx_queues[rss->queue[i]]->rx_started)
955 for (i = 0; i < vnic->rx_queue_cnt; i++) {
958 for (j = 0; j < vnic->rx_queue_cnt; j++) {
959 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
965 if (match != vnic->rx_queue_cnt) {
967 "VNIC queue count %d vs queues matched %d\n",
968 match, vnic->rx_queue_cnt);
976 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
977 struct bnxt_filter_info *filter1,
981 !(filter->valid_flags &
982 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
983 BNXT_FLOW_L2_SRC_VALID_FLAG |
984 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
985 BNXT_FLOW_L2_INNER_DST_VALID_FLAG |
986 BNXT_FLOW_L2_DROP_FLAG |
987 BNXT_FLOW_PARSE_INNER_FLAG))) {
988 filter->flags = filter1->flags;
989 filter->enables = filter1->enables;
990 filter->filter_type = HWRM_CFA_L2_FILTER;
991 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
992 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
993 filter->pri_hint = filter1->pri_hint;
994 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
996 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
997 filter->l2_ref_cnt = filter1->l2_ref_cnt;
998 filter->flow_id = filter1->flow_id;
1000 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
1001 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
1005 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
1006 const struct rte_flow_item pattern[],
1007 const struct rte_flow_action actions[],
1008 const struct rte_flow_attr *attr,
1009 struct rte_flow_error *error,
1010 struct bnxt_filter_info *filter)
1012 const struct rte_flow_action *act =
1013 bnxt_flow_non_void_action(actions);
1014 struct bnxt *bp = dev->data->dev_private;
1015 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1016 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
1017 const struct rte_flow_action_queue *act_q;
1018 const struct rte_flow_action_vf *act_vf;
1019 struct bnxt_filter_info *filter1 = NULL;
1020 const struct rte_flow_action_rss *rss;
1021 struct bnxt_rx_queue *rxq = NULL;
1022 int dflt_vnic, vnic_id;
1023 unsigned int rss_idx;
1028 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
1032 rc = bnxt_flow_parse_attr(attr, error);
1036 /* Since we support ingress attribute only - right now. */
1037 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1038 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1040 use_ntuple = bnxt_filter_type_check(pattern, error);
1043 switch (act->type) {
1044 case RTE_FLOW_ACTION_TYPE_QUEUE:
1045 /* Allow this flow. Redirect to a VNIC. */
1046 act_q = (const struct rte_flow_action_queue *)act->conf;
1047 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1048 rte_flow_error_set(error,
1050 RTE_FLOW_ERROR_TYPE_ACTION,
1052 "Invalid queue ID.");
1056 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1058 vnic_id = attr->group;
1060 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1061 vnic_id = act_q->index;
1064 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1066 vnic = &bp->vnic_info[vnic_id];
1067 if (vnic->rx_queue_cnt) {
1068 if (vnic->start_grp_id != act_q->index) {
1070 "VNIC already in use\n");
1071 rte_flow_error_set(error,
1073 RTE_FLOW_ERROR_TYPE_ACTION,
1075 "VNIC already in use");
1082 rxq = bp->rx_queues[act_q->index];
1084 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1085 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1090 "Queue invalid or used with other VNIC\n");
1091 rte_flow_error_set(error,
1093 RTE_FLOW_ERROR_TYPE_ACTION,
1095 "Queue invalid queue or in use");
1101 rxq->rx_started = 1;
1102 vnic->rx_queue_cnt++;
1103 vnic->start_grp_id = act_q->index;
1104 vnic->end_grp_id = act_q->index;
1105 vnic->func_default = 0; //This is not a default VNIC.
1107 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1109 rc = bnxt_vnic_prep(bp, vnic);
1111 rte_flow_error_set(error,
1113 RTE_FLOW_ERROR_TYPE_ACTION,
1121 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1122 act_q->index, vnic, vnic->fw_grp_ids);
1125 vnic->ff_pool_idx = vnic_id;
1127 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1128 filter->dst_id = vnic->fw_vnic_id;
1130 /* For ntuple filter, create the L2 filter with default VNIC.
1131 * The user specified redirect queue will be set while creating
1132 * the ntuple filter in hardware.
1134 vnic0 = BNXT_GET_DEFAULT_VNIC(bp);
1136 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1138 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1139 if (filter1 == NULL) {
1140 rte_flow_error_set(error,
1142 RTE_FLOW_ERROR_TYPE_ACTION,
1144 "Filter not available");
1149 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1150 filter, filter1, filter1->l2_ref_cnt);
1151 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1153 case RTE_FLOW_ACTION_TYPE_DROP:
1154 vnic0 = &bp->vnic_info[0];
1155 filter->dst_id = vnic0->fw_vnic_id;
1156 filter->valid_flags |= BNXT_FLOW_L2_DROP_FLAG;
1157 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1158 if (filter1 == NULL) {
1159 rte_flow_error_set(error,
1161 RTE_FLOW_ERROR_TYPE_ACTION,
1163 "Filter not available");
1168 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1170 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1171 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1173 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1175 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1177 case RTE_FLOW_ACTION_TYPE_COUNT:
1178 vnic0 = &bp->vnic_info[0];
1179 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1180 if (filter1 == NULL) {
1181 rte_flow_error_set(error,
1183 RTE_FLOW_ERROR_TYPE_ACTION,
1185 "New filter not available");
1190 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1191 filter->flow_id = filter1->flow_id;
1192 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1194 case RTE_FLOW_ACTION_TYPE_VF:
1195 act_vf = (const struct rte_flow_action_vf *)act->conf;
1198 if (filter->tunnel_type ==
1199 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1200 filter->tunnel_type ==
1201 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1202 /* If issued on a VF, ensure id is 0 and is trusted */
1204 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1205 rte_flow_error_set(error, EINVAL,
1206 RTE_FLOW_ERROR_TYPE_ACTION,
1214 filter->enables |= filter->tunnel_type;
1215 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1219 if (vf >= bp->pdev->max_vfs) {
1220 rte_flow_error_set(error,
1222 RTE_FLOW_ERROR_TYPE_ACTION,
1224 "Incorrect VF id!");
1229 filter->mirror_vnic_id =
1230 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1231 if (dflt_vnic < 0) {
1232 /* This simply indicates there's no driver loaded.
1233 * This is not an error.
1235 rte_flow_error_set(error,
1237 RTE_FLOW_ERROR_TYPE_ACTION,
1239 "Unable to get default VNIC for VF");
1244 filter->mirror_vnic_id = dflt_vnic;
1245 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1247 vnic0 = &bp->vnic_info[0];
1248 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1249 if (filter1 == NULL) {
1250 rte_flow_error_set(error,
1252 RTE_FLOW_ERROR_TYPE_ACTION,
1254 "New filter not available");
1259 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1260 filter->flow_id = filter1->flow_id;
1262 case RTE_FLOW_ACTION_TYPE_RSS:
1263 rss = (const struct rte_flow_action_rss *)act->conf;
1265 vnic_id = attr->group;
1267 BNXT_VALID_VNIC_OR_RET(bp, vnic_id);
1268 vnic = &bp->vnic_info[vnic_id];
1270 /* Check if requested RSS config matches RSS config of VNIC
1271 * only if it is not a fresh VNIC configuration.
1272 * Otherwise the existing VNIC configuration can be used.
1274 if (vnic->rx_queue_cnt) {
1275 rc = match_vnic_rss_cfg(bp, vnic, rss);
1278 "VNIC and RSS config mismatch\n");
1279 rte_flow_error_set(error,
1281 RTE_FLOW_ERROR_TYPE_ACTION,
1283 "VNIC and RSS cfg mismatch");
1290 for (i = 0; i < rss->queue_num; i++) {
1291 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1294 if (!rss->queue[i] ||
1295 rss->queue[i] >= bp->rx_nr_rings ||
1296 !bp->rx_queues[rss->queue[i]]) {
1297 rte_flow_error_set(error,
1299 RTE_FLOW_ERROR_TYPE_ACTION,
1301 "Invalid queue ID for RSS");
1305 rxq = bp->rx_queues[rss->queue[i]];
1307 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1308 INVALID_HW_RING_ID) {
1310 "queue active with other VNIC\n");
1311 rte_flow_error_set(error,
1313 RTE_FLOW_ERROR_TYPE_ACTION,
1315 "Invalid queue ID for RSS");
1321 rxq->rx_started = 1;
1322 vnic->rx_queue_cnt++;
1325 vnic->start_grp_id = rss->queue[0];
1326 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1327 vnic->func_default = 0; //This is not a default VNIC.
1329 rc = bnxt_vnic_prep(bp, vnic);
1331 rte_flow_error_set(error,
1333 RTE_FLOW_ERROR_TYPE_ACTION,
1341 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1342 vnic_id, vnic, vnic->fw_grp_ids);
1344 vnic->ff_pool_idx = vnic_id;
1346 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1348 /* This can be done only after vnic_grp_alloc is done. */
1349 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1350 vnic->fw_grp_ids[i] =
1351 bp->grp_info[rss->queue[i]].fw_grp_id;
1352 /* Make sure vnic0 does not use these rings. */
1353 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1357 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1358 for (i = 0; i < vnic->rx_queue_cnt; i++)
1359 vnic->rss_table[rss_idx++] =
1360 vnic->fw_grp_ids[i];
1363 /* Configure RSS only if the queue count is > 1 */
1364 if (vnic->rx_queue_cnt > 1) {
1366 bnxt_rte_to_hwrm_hash_types(rss->types);
1368 if (!rss->key_len) {
1369 /* If hash key has not been specified,
1370 * use random hash key.
1372 prandom_bytes(vnic->rss_hash_key,
1375 if (rss->key_len > HW_HASH_KEY_SIZE)
1376 memcpy(vnic->rss_hash_key,
1380 memcpy(vnic->rss_hash_key,
1384 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1386 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1390 filter->dst_id = vnic->fw_vnic_id;
1391 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1392 if (filter1 == NULL) {
1393 rte_flow_error_set(error,
1395 RTE_FLOW_ERROR_TYPE_ACTION,
1397 "New filter not available");
1402 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1403 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1405 case RTE_FLOW_ACTION_TYPE_MARK:
1406 if (bp->flags & BNXT_FLAG_RX_VECTOR_PKT_MODE) {
1408 "Disable vector processing for mark\n");
1409 rte_flow_error_set(error,
1411 RTE_FLOW_ERROR_TYPE_ACTION,
1413 "Disable vector processing for mark");
1418 if (bp->mark_table == NULL) {
1419 rte_flow_error_set(error,
1421 RTE_FLOW_ERROR_TYPE_ACTION,
1423 "Mark table not allocated.");
1428 filter->valid_flags |= BNXT_FLOW_MARK_FLAG;
1429 filter->mark = ((const struct rte_flow_action_mark *)
1431 PMD_DRV_LOG(DEBUG, "Mark the flow %d\n", filter->mark);
1434 rte_flow_error_set(error,
1436 RTE_FLOW_ERROR_TYPE_ACTION,
1444 act = bnxt_flow_non_void_action(++act);
1445 while (act->type != RTE_FLOW_ACTION_TYPE_END)
1452 bnxt_hwrm_clear_l2_filter(bp, filter1);
1453 bnxt_free_filter(bp, filter1);
1457 if (vnic && STAILQ_EMPTY(&vnic->filter))
1458 vnic->rx_queue_cnt = 0;
1460 if (rxq && !vnic->rx_queue_cnt)
1461 rxq->vnic = &bp->vnic_info[0];
1467 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1468 struct bnxt_filter_info *filter)
1470 struct bnxt_vnic_info *vnic = NULL;
1473 for (i = 0; i < bp->max_vnics; i++) {
1474 vnic = &bp->vnic_info[i];
1475 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1476 filter->dst_id == vnic->fw_vnic_id) {
1477 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1486 bnxt_flow_validate(struct rte_eth_dev *dev,
1487 const struct rte_flow_attr *attr,
1488 const struct rte_flow_item pattern[],
1489 const struct rte_flow_action actions[],
1490 struct rte_flow_error *error)
1492 struct bnxt *bp = dev->data->dev_private;
1493 struct bnxt_vnic_info *vnic = NULL;
1494 struct bnxt_filter_info *filter;
1497 bnxt_acquire_flow_lock(bp);
1498 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1500 bnxt_release_flow_lock(bp);
1504 filter = bnxt_get_unused_filter(bp);
1505 if (filter == NULL) {
1506 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1507 bnxt_release_flow_lock(bp);
1511 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1516 vnic = find_matching_vnic(bp, filter);
1518 if (STAILQ_EMPTY(&vnic->filter)) {
1519 rte_free(vnic->fw_grp_ids);
1520 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1521 bnxt_hwrm_vnic_free(bp, vnic);
1522 vnic->rx_queue_cnt = 0;
1523 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1527 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1528 bnxt_hwrm_clear_em_filter(bp, filter);
1529 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1530 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1532 bnxt_hwrm_clear_l2_filter(bp, filter);
1535 /* No need to hold on to this filter if we are just validating flow */
1536 bnxt_free_filter(bp, filter);
1537 bnxt_release_flow_lock(bp);
1543 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1544 struct bnxt_filter_info *new_filter)
1546 /* Clear the new L2 filter that was created in the previous step in
1547 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1548 * filter which points to the new destination queue and so we clear
1549 * the previous L2 filter. For ntuple filters, we are going to reuse
1550 * the old L2 filter and create new NTUPLE filter with this new
1551 * destination queue subsequently during bnxt_flow_create. So we
1552 * decrement the ref cnt of the L2 filter that would've been bumped
1553 * up previously in bnxt_validate_and_parse_flow as the old n-tuple
1554 * filter that was referencing it will be deleted now.
1556 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1557 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1558 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1560 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1561 bnxt_hwrm_clear_em_filter(bp, old_filter);
1562 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1563 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1568 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1570 struct bnxt_filter_info *mf;
1571 struct rte_flow *flow;
1574 for (i = bp->max_vnics - 1; i >= 0; i--) {
1575 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1577 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1580 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1583 if (mf->filter_type == nf->filter_type &&
1584 mf->flags == nf->flags &&
1585 mf->src_port == nf->src_port &&
1586 mf->src_port_mask == nf->src_port_mask &&
1587 mf->dst_port == nf->dst_port &&
1588 mf->dst_port_mask == nf->dst_port_mask &&
1589 mf->ip_protocol == nf->ip_protocol &&
1590 mf->ip_addr_type == nf->ip_addr_type &&
1591 mf->ethertype == nf->ethertype &&
1592 mf->vni == nf->vni &&
1593 mf->tunnel_type == nf->tunnel_type &&
1594 mf->l2_ovlan == nf->l2_ovlan &&
1595 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1596 mf->l2_ivlan == nf->l2_ivlan &&
1597 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1598 !memcmp(mf->l2_addr, nf->l2_addr,
1599 RTE_ETHER_ADDR_LEN) &&
1600 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1601 RTE_ETHER_ADDR_LEN) &&
1602 !memcmp(mf->src_macaddr, nf->src_macaddr,
1603 RTE_ETHER_ADDR_LEN) &&
1604 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1605 RTE_ETHER_ADDR_LEN) &&
1606 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1607 sizeof(nf->src_ipaddr)) &&
1608 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1609 sizeof(nf->src_ipaddr_mask)) &&
1610 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1611 sizeof(nf->dst_ipaddr)) &&
1612 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1613 sizeof(nf->dst_ipaddr_mask))) {
1614 if (mf->dst_id == nf->dst_id)
1616 /* Free the old filter, update flow
1619 bnxt_update_filter(bp, mf, nf);
1620 STAILQ_REMOVE(&vnic->filter, mf,
1621 bnxt_filter_info, next);
1622 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1623 bnxt_free_filter(bp, mf);
1633 bnxt_setup_flow_counter(struct bnxt *bp)
1635 if (bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_COUNTERS &&
1636 !(bp->flags & BNXT_FLAG_FC_THREAD) && BNXT_FLOW_XSTATS_EN(bp)) {
1637 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1638 bnxt_flow_cnt_alarm_cb,
1640 bp->flags |= BNXT_FLAG_FC_THREAD;
1644 void bnxt_flow_cnt_alarm_cb(void *arg)
1647 struct bnxt *bp = arg;
1649 if (!bp->flow_stat->rx_fc_out_tbl.va) {
1650 PMD_DRV_LOG(ERR, "bp->flow_stat->rx_fc_out_tbl.va is NULL?\n");
1651 bnxt_cancel_fc_thread(bp);
1655 if (!bp->flow_stat->flow_count) {
1656 bnxt_cancel_fc_thread(bp);
1660 if (!bp->eth_dev->data->dev_started) {
1661 bnxt_cancel_fc_thread(bp);
1665 rc = bnxt_flow_stats_req(bp);
1667 PMD_DRV_LOG(ERR, "Flow stat alarm not rescheduled.\n");
1671 rte_eal_alarm_set(US_PER_S * BNXT_FC_TIMER,
1672 bnxt_flow_cnt_alarm_cb,
1677 static struct rte_flow *
1678 bnxt_flow_create(struct rte_eth_dev *dev,
1679 const struct rte_flow_attr *attr,
1680 const struct rte_flow_item pattern[],
1681 const struct rte_flow_action actions[],
1682 struct rte_flow_error *error)
1684 struct bnxt *bp = dev->data->dev_private;
1685 struct bnxt_vnic_info *vnic = NULL;
1686 struct bnxt_filter_info *filter;
1687 bool update_flow = false;
1688 struct rte_flow *flow;
1690 uint32_t tun_type, flow_id;
1692 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1695 "Failed to create flow, Not a Trusted VF!");
1699 if (!dev->data->dev_started) {
1700 rte_flow_error_set(error,
1702 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1704 "Device must be started");
1708 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1710 rte_flow_error_set(error, ENOMEM,
1711 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1712 "Failed to allocate memory");
1716 bnxt_acquire_flow_lock(bp);
1717 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1719 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1723 filter = bnxt_get_unused_filter(bp);
1724 if (filter == NULL) {
1725 rte_flow_error_set(error, ENOSPC,
1726 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1727 "Not enough resources for a new flow");
1731 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1736 ret = bnxt_match_filter(bp, filter);
1737 if (ret == -EEXIST) {
1738 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1739 /* Clear the filter that was created as part of
1740 * validate_and_parse_flow() above
1742 bnxt_hwrm_clear_l2_filter(bp, filter);
1744 } else if (ret == -EXDEV) {
1745 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1746 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1750 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1751 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1754 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1755 filter->enables == filter->tunnel_type) {
1756 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1758 rte_flow_error_set(error, -ret,
1759 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1760 "Unable to query tunnel to VF");
1763 if (tun_type == (1U << filter->tunnel_type)) {
1765 bnxt_hwrm_tunnel_redirect_free(bp,
1766 filter->tunnel_type);
1769 "Unable to free existing tunnel\n");
1770 rte_flow_error_set(error, -ret,
1771 RTE_FLOW_ERROR_TYPE_HANDLE,
1773 "Unable to free preexisting "
1778 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1780 rte_flow_error_set(error, -ret,
1781 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1782 "Unable to redirect tunnel to VF");
1785 vnic = &bp->vnic_info[0];
1789 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1791 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1792 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1795 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1797 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1798 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1801 vnic = find_matching_vnic(bp, filter);
1803 if (!ret || update_flow) {
1804 flow->filter = filter;
1811 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1812 PMD_DRV_LOG(DEBUG, "Successfully created flow.\n");
1813 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1814 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1816 "Mark action: mark id 0x%x, flow id 0x%x\n",
1817 filter->mark, filter->flow_id);
1819 /* TCAM and EM should be 16-bit only.
1820 * Other modes not supported.
1822 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1823 if (bp->mark_table[flow_id].valid) {
1825 "Entry for Mark id 0x%x occupied"
1827 filter->mark, filter->flow_id);
1830 bp->mark_table[flow_id].valid = true;
1831 bp->mark_table[flow_id].mark_id = filter->mark;
1833 if (BNXT_FLOW_XSTATS_EN(bp))
1834 bp->flow_stat->flow_count++;
1835 bnxt_release_flow_lock(bp);
1836 bnxt_setup_flow_counter(bp);
1841 bnxt_free_filter(bp, filter);
1844 rte_flow_error_set(error, ret,
1845 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1846 "Matching Flow exists.");
1847 else if (ret == -EXDEV)
1848 rte_flow_error_set(error, 0,
1849 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1850 "Flow with pattern exists, updating destination queue");
1851 else if (!rte_errno)
1852 rte_flow_error_set(error, -ret,
1853 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1854 "Failed to create flow.");
1857 bnxt_release_flow_lock(bp);
1861 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1862 struct bnxt_filter_info *filter,
1863 struct rte_flow_error *error)
1865 uint16_t tun_dst_fid;
1869 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1871 rte_flow_error_set(error, -ret,
1872 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1873 "Unable to query tunnel to VF");
1876 if (tun_type == (1U << filter->tunnel_type)) {
1877 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1880 rte_flow_error_set(error, -ret,
1881 RTE_FLOW_ERROR_TYPE_HANDLE,
1883 "tunnel_redirect info cmd fail");
1886 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1887 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1889 /* Tunnel doesn't belong to this VF, so don't send HWRM
1890 * cmd, just delete the flow from driver
1892 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1894 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1896 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1897 filter->tunnel_type);
1903 _bnxt_flow_destroy(struct bnxt *bp,
1904 struct rte_flow *flow,
1905 struct rte_flow_error *error)
1907 struct bnxt_filter_info *filter;
1908 struct bnxt_vnic_info *vnic;
1912 filter = flow->filter;
1915 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1916 filter->enables == filter->tunnel_type) {
1917 ret = bnxt_handle_tunnel_redirect_destroy(bp, filter, error);
1924 ret = bnxt_match_filter(bp, filter);
1926 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1928 if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) {
1929 flow_id = filter->flow_id & BNXT_FLOW_ID_MASK;
1930 memset(&bp->mark_table[flow_id], 0,
1931 sizeof(bp->mark_table[flow_id]));
1932 filter->flow_id = 0;
1935 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1936 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1937 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1938 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1939 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1943 /* If it is a L2 drop filter, when the filter is created,
1944 * the FW updates the BC/MC records.
1945 * Once this filter is removed, issue the set_rx_mask command
1946 * to reset the BC/MC records in the HW to the settings
1947 * before the drop counter is created.
1949 if (filter->valid_flags & BNXT_FLOW_L2_DROP_FLAG)
1950 bnxt_set_rx_mask_no_vlan(bp, &bp->vnic_info[0]);
1952 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1953 bnxt_free_filter(bp, filter);
1954 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1956 if (BNXT_FLOW_XSTATS_EN(bp))
1957 bp->flow_stat->flow_count--;
1959 /* If this was the last flow associated with this vnic,
1960 * switch the queue back to RSS pool.
1962 if (vnic && !vnic->func_default &&
1963 STAILQ_EMPTY(&vnic->flow_list)) {
1964 rte_free(vnic->fw_grp_ids);
1965 if (vnic->rx_queue_cnt > 1)
1966 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1968 bnxt_hwrm_vnic_free(bp, vnic);
1969 vnic->rx_queue_cnt = 0;
1972 rte_flow_error_set(error, -ret,
1973 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1974 "Failed to destroy flow.");
1981 bnxt_flow_destroy(struct rte_eth_dev *dev,
1982 struct rte_flow *flow,
1983 struct rte_flow_error *error)
1985 struct bnxt *bp = dev->data->dev_private;
1988 bnxt_acquire_flow_lock(bp);
1990 rte_flow_error_set(error, EINVAL,
1991 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1992 "Invalid flow: failed to destroy flow.");
1993 bnxt_release_flow_lock(bp);
1997 if (!flow->filter) {
1998 rte_flow_error_set(error, EINVAL,
1999 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2000 "Invalid flow: failed to destroy flow.");
2001 bnxt_release_flow_lock(bp);
2004 ret = _bnxt_flow_destroy(bp, flow, error);
2005 bnxt_release_flow_lock(bp);
2010 void bnxt_cancel_fc_thread(struct bnxt *bp)
2012 bp->flags &= ~BNXT_FLAG_FC_THREAD;
2013 rte_eal_alarm_cancel(bnxt_flow_cnt_alarm_cb, (void *)bp);
2017 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2019 struct bnxt *bp = dev->data->dev_private;
2020 struct bnxt_vnic_info *vnic;
2021 struct rte_flow *flow;
2025 bnxt_acquire_flow_lock(bp);
2026 for (i = 0; i < bp->max_vnics; i++) {
2027 vnic = &bp->vnic_info[i];
2028 if (vnic && vnic->fw_vnic_id == INVALID_VNIC_ID)
2031 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2032 flow = STAILQ_FIRST(&vnic->flow_list);
2037 ret = _bnxt_flow_destroy(bp, flow, error);
2043 bnxt_cancel_fc_thread(bp);
2044 bnxt_release_flow_lock(bp);
2049 const struct rte_flow_ops bnxt_flow_ops = {
2050 .validate = bnxt_flow_validate,
2051 .create = bnxt_flow_create,
2052 .destroy = bnxt_flow_destroy,
2053 .flush = bnxt_flow_flush,