1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25 const struct rte_flow_item pattern[],
26 const struct rte_flow_action actions[],
27 struct rte_flow_error *error)
30 rte_flow_error_set(error,
32 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
39 rte_flow_error_set(error,
41 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
48 rte_flow_error_set(error,
50 RTE_FLOW_ERROR_TYPE_ATTR,
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81 struct rte_flow_error *error __rte_unused)
83 const struct rte_flow_item *item =
84 bnxt_flow_non_void_item(pattern);
88 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
90 case RTE_FLOW_ITEM_TYPE_ANY:
91 case RTE_FLOW_ITEM_TYPE_ETH:
94 case RTE_FLOW_ITEM_TYPE_VLAN:
98 case RTE_FLOW_ITEM_TYPE_IPV4:
99 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_TCP:
101 case RTE_FLOW_ITEM_TYPE_UDP:
103 /* need ntuple match, reset exact match */
107 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
113 if (has_vlan && use_ntuple) {
115 "VLAN flow cannot use NTUPLE filter\n");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM,
119 "Cannot use VLAN with NTUPLE");
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128 const struct rte_flow_attr *attr,
129 const struct rte_flow_item pattern[],
130 struct rte_flow_error *error,
131 struct bnxt_filter_info *filter)
133 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138 const struct rte_flow_item_udp *udp_spec, *udp_mask;
139 const struct rte_flow_item_eth *eth_spec, *eth_mask;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 if (!item->spec || !item->mask) {
181 rte_flow_error_set(error, EINVAL,
182 RTE_FLOW_ERROR_TYPE_ITEM,
184 "spec/mask is NULL");
188 switch (item->type) {
189 case RTE_FLOW_ITEM_TYPE_ANY:
191 ((const struct rte_flow_item_any *)item->spec)->num > 3;
193 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
195 case RTE_FLOW_ITEM_TYPE_ETH:
196 if (!item->spec || !item->mask)
199 eth_spec = item->spec;
200 eth_mask = item->mask;
202 /* Source MAC address mask cannot be partially set.
203 * Should be All 0's or all 1's.
204 * Destination MAC address mask must not be partially
205 * set. Should be all 1's or all 0's.
207 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
208 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
209 (!rte_is_zero_ether_addr(ð_mask->dst) &&
210 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
211 rte_flow_error_set(error,
213 RTE_FLOW_ERROR_TYPE_ITEM,
215 "MAC_addr mask not valid");
219 /* Mask is not allowed. Only exact matches are */
220 if (eth_mask->type &&
221 eth_mask->type != RTE_BE16(0xffff)) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ITEM,
225 "ethertype mask not valid");
229 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
230 if (!rte_is_unicast_ether_addr(ð_spec->dst)) {
231 rte_flow_error_set(error,
233 RTE_FLOW_ERROR_TYPE_ITEM,
238 rte_memcpy(filter->dst_macaddr,
239 ð_spec->dst, RTE_ETHER_ADDR_LEN);
241 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
242 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
243 valid_flags |= inner ?
244 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
245 BNXT_FLOW_L2_DST_VALID_FLAG;
246 filter->priority = attr->priority;
248 "Creating a priority flow\n");
251 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
252 if (!rte_is_unicast_ether_addr(ð_spec->src)) {
253 rte_flow_error_set(error,
255 RTE_FLOW_ERROR_TYPE_ITEM,
260 rte_memcpy(filter->src_macaddr,
261 ð_spec->src, RTE_ETHER_ADDR_LEN);
263 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
264 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
265 valid_flags |= inner ?
266 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
267 BNXT_FLOW_L2_SRC_VALID_FLAG;
270 * PMD_DRV_LOG(ERR, "Handle this condition\n");
273 if (eth_mask->type) {
275 rte_be_to_cpu_16(eth_spec->type);
280 case RTE_FLOW_ITEM_TYPE_VLAN:
281 vlan_spec = item->spec;
282 vlan_mask = item->mask;
283 if (en & en_ethertype) {
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM,
287 "VLAN TPID matching is not"
291 if (vlan_mask->tci &&
292 vlan_mask->tci == RTE_BE16(0x0fff)) {
293 /* Only the VLAN ID can be matched. */
295 rte_be_to_cpu_16(vlan_spec->tci &
297 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
299 rte_flow_error_set(error,
301 RTE_FLOW_ERROR_TYPE_ITEM,
303 "VLAN mask is invalid");
306 if (vlan_mask->inner_type &&
307 vlan_mask->inner_type != RTE_BE16(0xffff)) {
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_ITEM,
311 "inner ethertype mask not"
315 if (vlan_mask->inner_type) {
317 rte_be_to_cpu_16(vlan_spec->inner_type);
322 case RTE_FLOW_ITEM_TYPE_IPV4:
323 /* If mask is not involved, we could use EM filters. */
324 ipv4_spec = item->spec;
325 ipv4_mask = item->mask;
327 if (!item->spec || !item->mask)
330 /* Only IP DST and SRC fields are maskable. */
331 if (ipv4_mask->hdr.version_ihl ||
332 ipv4_mask->hdr.type_of_service ||
333 ipv4_mask->hdr.total_length ||
334 ipv4_mask->hdr.packet_id ||
335 ipv4_mask->hdr.fragment_offset ||
336 ipv4_mask->hdr.time_to_live ||
337 ipv4_mask->hdr.next_proto_id ||
338 ipv4_mask->hdr.hdr_checksum) {
339 rte_flow_error_set(error,
341 RTE_FLOW_ERROR_TYPE_ITEM,
343 "Invalid IPv4 mask.");
347 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
348 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
351 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
352 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
354 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
355 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
357 if (ipv4_mask->hdr.src_addr) {
358 filter->src_ipaddr_mask[0] =
359 ipv4_mask->hdr.src_addr;
360 en |= !use_ntuple ? 0 :
361 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
364 if (ipv4_mask->hdr.dst_addr) {
365 filter->dst_ipaddr_mask[0] =
366 ipv4_mask->hdr.dst_addr;
367 en |= !use_ntuple ? 0 :
368 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
371 filter->ip_addr_type = use_ntuple ?
372 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
373 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
375 if (ipv4_spec->hdr.next_proto_id) {
376 filter->ip_protocol =
377 ipv4_spec->hdr.next_proto_id;
379 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
381 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
384 case RTE_FLOW_ITEM_TYPE_IPV6:
385 ipv6_spec = item->spec;
386 ipv6_mask = item->mask;
388 if (!item->spec || !item->mask)
391 /* Only IP DST and SRC fields are maskable. */
392 if (ipv6_mask->hdr.vtc_flow ||
393 ipv6_mask->hdr.payload_len ||
394 ipv6_mask->hdr.proto ||
395 ipv6_mask->hdr.hop_limits) {
396 rte_flow_error_set(error,
398 RTE_FLOW_ERROR_TYPE_ITEM,
400 "Invalid IPv6 mask.");
405 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
406 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
408 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
409 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
411 rte_memcpy(filter->src_ipaddr,
412 ipv6_spec->hdr.src_addr, 16);
413 rte_memcpy(filter->dst_ipaddr,
414 ipv6_spec->hdr.dst_addr, 16);
416 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
418 rte_memcpy(filter->src_ipaddr_mask,
419 ipv6_mask->hdr.src_addr, 16);
420 en |= !use_ntuple ? 0 :
421 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
424 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
426 rte_memcpy(filter->dst_ipaddr_mask,
427 ipv6_mask->hdr.dst_addr, 16);
428 en |= !use_ntuple ? 0 :
429 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
432 filter->ip_addr_type = use_ntuple ?
433 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
434 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
436 case RTE_FLOW_ITEM_TYPE_TCP:
437 tcp_spec = item->spec;
438 tcp_mask = item->mask;
440 if (!item->spec || !item->mask)
443 /* Check TCP mask. Only DST & SRC ports are maskable */
444 if (tcp_mask->hdr.sent_seq ||
445 tcp_mask->hdr.recv_ack ||
446 tcp_mask->hdr.data_off ||
447 tcp_mask->hdr.tcp_flags ||
448 tcp_mask->hdr.rx_win ||
449 tcp_mask->hdr.cksum ||
450 tcp_mask->hdr.tcp_urp) {
451 rte_flow_error_set(error,
453 RTE_FLOW_ERROR_TYPE_ITEM,
459 filter->src_port = tcp_spec->hdr.src_port;
460 filter->dst_port = tcp_spec->hdr.dst_port;
463 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
464 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
466 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
467 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
469 if (tcp_mask->hdr.dst_port) {
470 filter->dst_port_mask = tcp_mask->hdr.dst_port;
471 en |= !use_ntuple ? 0 :
472 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
475 if (tcp_mask->hdr.src_port) {
476 filter->src_port_mask = tcp_mask->hdr.src_port;
477 en |= !use_ntuple ? 0 :
478 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
481 case RTE_FLOW_ITEM_TYPE_UDP:
482 udp_spec = item->spec;
483 udp_mask = item->mask;
485 if (!item->spec || !item->mask)
488 if (udp_mask->hdr.dgram_len ||
489 udp_mask->hdr.dgram_cksum) {
490 rte_flow_error_set(error,
492 RTE_FLOW_ERROR_TYPE_ITEM,
498 filter->src_port = udp_spec->hdr.src_port;
499 filter->dst_port = udp_spec->hdr.dst_port;
502 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
503 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
505 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
506 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
508 if (udp_mask->hdr.dst_port) {
509 filter->dst_port_mask = udp_mask->hdr.dst_port;
510 en |= !use_ntuple ? 0 :
511 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
514 if (udp_mask->hdr.src_port) {
515 filter->src_port_mask = udp_mask->hdr.src_port;
516 en |= !use_ntuple ? 0 :
517 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
520 case RTE_FLOW_ITEM_TYPE_VXLAN:
521 vxlan_spec = item->spec;
522 vxlan_mask = item->mask;
523 /* Check if VXLAN item is used to describe protocol.
524 * If yes, both spec and mask should be NULL.
525 * If no, both spec and mask shouldn't be NULL.
527 if ((!vxlan_spec && vxlan_mask) ||
528 (vxlan_spec && !vxlan_mask)) {
529 rte_flow_error_set(error,
531 RTE_FLOW_ERROR_TYPE_ITEM,
533 "Invalid VXLAN item");
537 if (!vxlan_spec && !vxlan_mask) {
538 filter->tunnel_type =
539 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
543 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
544 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
545 vxlan_spec->flags != 0x8) {
546 rte_flow_error_set(error,
548 RTE_FLOW_ERROR_TYPE_ITEM,
550 "Invalid VXLAN item");
554 /* Check if VNI is masked. */
555 if (vxlan_spec && vxlan_mask) {
557 !!memcmp(vxlan_mask->vni, vni_mask,
563 RTE_FLOW_ERROR_TYPE_ITEM,
569 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
572 rte_be_to_cpu_32(tenant_id_be);
573 filter->tunnel_type =
574 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
577 case RTE_FLOW_ITEM_TYPE_NVGRE:
578 nvgre_spec = item->spec;
579 nvgre_mask = item->mask;
580 /* Check if NVGRE item is used to describe protocol.
581 * If yes, both spec and mask should be NULL.
582 * If no, both spec and mask shouldn't be NULL.
584 if ((!nvgre_spec && nvgre_mask) ||
585 (nvgre_spec && !nvgre_mask)) {
586 rte_flow_error_set(error,
588 RTE_FLOW_ERROR_TYPE_ITEM,
590 "Invalid NVGRE item");
594 if (!nvgre_spec && !nvgre_mask) {
595 filter->tunnel_type =
596 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
600 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
601 nvgre_spec->protocol != 0x6558) {
602 rte_flow_error_set(error,
604 RTE_FLOW_ERROR_TYPE_ITEM,
606 "Invalid NVGRE item");
610 if (nvgre_spec && nvgre_mask) {
612 !!memcmp(nvgre_mask->tni, tni_mask,
618 RTE_FLOW_ERROR_TYPE_ITEM,
623 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
626 rte_be_to_cpu_32(tenant_id_be);
627 filter->tunnel_type =
628 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
632 case RTE_FLOW_ITEM_TYPE_GRE:
633 gre_spec = (const struct rte_flow_item_gre *)item->spec;
634 gre_mask = (const struct rte_flow_item_gre *)item->mask;
637 *Check if GRE item is used to describe protocol.
638 * If yes, both spec and mask should be NULL.
639 * If no, both spec and mask shouldn't be NULL.
641 if (!!gre_spec ^ !!gre_mask) {
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM,
649 if (!gre_spec && !gre_mask) {
650 filter->tunnel_type =
651 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
656 case RTE_FLOW_ITEM_TYPE_VF:
657 vf_spec = item->spec;
660 rte_flow_error_set(error,
662 RTE_FLOW_ERROR_TYPE_ITEM,
664 "Configuring on a VF!");
668 if (vf >= bp->pdev->max_vfs) {
669 rte_flow_error_set(error,
671 RTE_FLOW_ERROR_TYPE_ITEM,
677 if (!attr->transfer) {
678 rte_flow_error_set(error,
680 RTE_FLOW_ERROR_TYPE_ITEM,
682 "Matching VF traffic without"
683 " affecting it (transfer attribute)"
688 filter->mirror_vnic_id =
689 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
691 /* This simply indicates there's no driver
692 * loaded. This is not an error.
697 RTE_FLOW_ERROR_TYPE_ITEM,
699 "Unable to get default VNIC for VF");
703 filter->mirror_vnic_id = dflt_vnic;
704 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
711 filter->enables = en;
712 filter->valid_flags = valid_flags;
717 /* Parse attributes */
719 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
720 struct rte_flow_error *error)
722 /* Must be input direction */
723 if (!attr->ingress) {
724 rte_flow_error_set(error,
726 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
728 "Only support ingress.");
734 rte_flow_error_set(error,
736 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
738 "No support for egress.");
745 static struct bnxt_filter_info *
746 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
748 struct bnxt_filter_info *mf, *f0;
749 struct bnxt_vnic_info *vnic0;
750 struct rte_flow *flow;
753 vnic0 = &bp->vnic_info[0];
754 f0 = STAILQ_FIRST(&vnic0->filter);
756 /* This flow has same DST MAC as the port/l2 filter. */
757 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
760 for (i = bp->max_vnics - 1; i >= 0; i--) {
761 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
763 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
766 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
769 if (mf->matching_l2_fltr_ptr)
772 if (mf->ethertype == nf->ethertype &&
773 mf->l2_ovlan == nf->l2_ovlan &&
774 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
775 mf->l2_ivlan == nf->l2_ivlan &&
776 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
777 !memcmp(mf->src_macaddr, nf->src_macaddr,
778 RTE_ETHER_ADDR_LEN) &&
779 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
787 static struct bnxt_filter_info *
788 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
789 struct bnxt_vnic_info *vnic)
791 struct bnxt_filter_info *filter1;
794 /* Alloc new L2 filter.
795 * This flow needs MAC filter which does not match any existing
798 filter1 = bnxt_get_unused_filter(bp);
802 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
803 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
804 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
805 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
807 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
808 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
811 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
812 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
813 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
814 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
816 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
817 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
819 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
820 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
824 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
825 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
826 /* Tell the FW where to place the filter in the table. */
827 if (nf->priority > 65535) {
829 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
830 /* This will place the filter in TCAM */
831 filter1->l2_filter_id_hint = (uint64_t)-1;
835 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
836 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
837 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
838 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
841 bnxt_free_filter(bp, filter1);
844 filter1->l2_ref_cnt++;
848 struct bnxt_filter_info *
849 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
850 struct bnxt_vnic_info *vnic)
852 struct bnxt_filter_info *l2_filter = NULL;
854 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
856 l2_filter->l2_ref_cnt++;
857 nf->matching_l2_fltr_ptr = l2_filter;
859 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
860 nf->matching_l2_fltr_ptr = NULL;
866 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
868 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
869 uint64_t rx_offloads = dev_conf->rxmode.offloads;
872 rc = bnxt_vnic_grp_alloc(bp, vnic);
876 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
878 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
883 /* RSS context is required only when there is more than one RSS ring */
884 if (vnic->rx_queue_cnt > 1) {
885 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
888 "HWRM vnic ctx alloc failure: %x\n", rc);
892 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
895 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
896 vnic->vlan_strip = true;
898 vnic->vlan_strip = false;
900 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
904 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
910 static int match_vnic_rss_cfg(struct bnxt *bp,
911 struct bnxt_vnic_info *vnic,
912 const struct rte_flow_action_rss *rss)
914 unsigned int match = 0, i;
916 if (vnic->rx_queue_cnt != rss->queue_num)
919 for (i = 0; i < rss->queue_num; i++) {
920 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
921 !bp->rx_queues[rss->queue[i]]->rx_started)
925 for (i = 0; i < vnic->rx_queue_cnt; i++) {
928 for (j = 0; j < vnic->rx_queue_cnt; j++) {
929 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
935 if (match != vnic->rx_queue_cnt) {
937 "VNIC queue count %d vs queues matched %d\n",
938 match, vnic->rx_queue_cnt);
946 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
947 struct bnxt_filter_info *filter1,
951 !(filter->valid_flags &
952 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
953 BNXT_FLOW_L2_SRC_VALID_FLAG |
954 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
955 BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
956 filter->flags = filter1->flags;
957 filter->enables = filter1->enables;
958 filter->filter_type = HWRM_CFA_L2_FILTER;
959 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
960 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
961 filter->pri_hint = filter1->pri_hint;
962 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
964 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
965 filter->l2_ref_cnt = filter1->l2_ref_cnt;
967 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
968 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
972 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
973 const struct rte_flow_item pattern[],
974 const struct rte_flow_action actions[],
975 const struct rte_flow_attr *attr,
976 struct rte_flow_error *error,
977 struct bnxt_filter_info *filter)
979 const struct rte_flow_action *act =
980 bnxt_flow_non_void_action(actions);
981 struct bnxt *bp = dev->data->dev_private;
982 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
983 struct bnxt_vnic_info *vnic = NULL, *vnic0 = NULL;
984 const struct rte_flow_action_queue *act_q;
985 const struct rte_flow_action_vf *act_vf;
986 struct bnxt_filter_info *filter1 = NULL;
987 const struct rte_flow_action_rss *rss;
988 struct bnxt_rx_queue *rxq = NULL;
989 int dflt_vnic, vnic_id;
990 unsigned int rss_idx;
995 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
999 rc = bnxt_flow_parse_attr(attr, error);
1003 /* Since we support ingress attribute only - right now. */
1004 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1005 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
1007 use_ntuple = bnxt_filter_type_check(pattern, error);
1008 switch (act->type) {
1009 case RTE_FLOW_ACTION_TYPE_QUEUE:
1010 /* Allow this flow. Redirect to a VNIC. */
1011 act_q = (const struct rte_flow_action_queue *)act->conf;
1012 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1013 rte_flow_error_set(error,
1015 RTE_FLOW_ERROR_TYPE_ACTION,
1017 "Invalid queue ID.");
1021 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1023 vnic_id = attr->group;
1025 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1026 vnic_id = act_q->index;
1029 vnic = &bp->vnic_info[vnic_id];
1031 rte_flow_error_set(error,
1033 RTE_FLOW_ERROR_TYPE_ACTION,
1035 "No matching VNIC found.");
1039 if (vnic->rx_queue_cnt) {
1040 if (vnic->start_grp_id != act_q->index) {
1042 "VNIC already in use\n");
1043 rte_flow_error_set(error,
1045 RTE_FLOW_ERROR_TYPE_ACTION,
1047 "VNIC already in use");
1054 rxq = bp->rx_queues[act_q->index];
1056 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1057 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1061 //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1062 //INVALID_HW_RING_ID ||
1063 //!rxq->rx_deferred_start) {
1065 bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1066 INVALID_HW_RING_ID) {
1068 "Queue invalid or used with other VNIC\n");
1069 rte_flow_error_set(error,
1071 RTE_FLOW_ERROR_TYPE_ACTION,
1073 "Queue invalid queue or in use");
1079 rxq->rx_started = 1;
1080 vnic->rx_queue_cnt++;
1081 vnic->start_grp_id = act_q->index;
1082 vnic->end_grp_id = act_q->index;
1083 vnic->func_default = 0; //This is not a default VNIC.
1085 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1087 rc = bnxt_vnic_prep(bp, vnic);
1089 rte_flow_error_set(error,
1091 RTE_FLOW_ERROR_TYPE_ACTION,
1099 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1100 act_q->index, vnic, vnic->fw_grp_ids);
1103 vnic->ff_pool_idx = vnic_id;
1105 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1106 filter->dst_id = vnic->fw_vnic_id;
1107 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1108 if (filter1 == NULL) {
1109 rte_flow_error_set(error,
1111 RTE_FLOW_ERROR_TYPE_ACTION,
1113 "Filter not available");
1118 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1119 filter, filter1, filter1->l2_ref_cnt);
1120 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1122 case RTE_FLOW_ACTION_TYPE_DROP:
1123 vnic0 = &bp->vnic_info[0];
1124 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1125 if (filter1 == NULL) {
1130 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1131 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1133 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1136 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1138 case RTE_FLOW_ACTION_TYPE_COUNT:
1139 vnic0 = &bp->vnic_info[0];
1140 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1141 if (filter1 == NULL) {
1142 rte_flow_error_set(error,
1144 RTE_FLOW_ERROR_TYPE_ACTION,
1146 "New filter not available");
1151 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1152 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1154 case RTE_FLOW_ACTION_TYPE_VF:
1155 act_vf = (const struct rte_flow_action_vf *)act->conf;
1158 if (filter->tunnel_type ==
1159 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1160 filter->tunnel_type ==
1161 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1162 /* If issued on a VF, ensure id is 0 and is trusted */
1164 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ACTION,
1174 filter->enables |= filter->tunnel_type;
1175 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1179 if (vf >= bp->pdev->max_vfs) {
1180 rte_flow_error_set(error,
1182 RTE_FLOW_ERROR_TYPE_ACTION,
1184 "Incorrect VF id!");
1189 filter->mirror_vnic_id =
1190 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1191 if (dflt_vnic < 0) {
1192 /* This simply indicates there's no driver loaded.
1193 * This is not an error.
1195 rte_flow_error_set(error,
1197 RTE_FLOW_ERROR_TYPE_ACTION,
1199 "Unable to get default VNIC for VF");
1204 filter->mirror_vnic_id = dflt_vnic;
1205 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1207 vnic0 = &bp->vnic_info[0];
1208 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1209 if (filter1 == NULL) {
1210 rte_flow_error_set(error,
1212 RTE_FLOW_ERROR_TYPE_ACTION,
1214 "New filter not available");
1219 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1221 case RTE_FLOW_ACTION_TYPE_RSS:
1222 rss = (const struct rte_flow_action_rss *)act->conf;
1224 vnic_id = attr->group;
1226 PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1227 rte_flow_error_set(error,
1229 RTE_FLOW_ERROR_TYPE_ATTR,
1231 "Group id cannot be 0");
1236 vnic = &bp->vnic_info[vnic_id];
1238 rte_flow_error_set(error,
1240 RTE_FLOW_ERROR_TYPE_ACTION,
1242 "No matching VNIC for RSS group.");
1246 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1248 /* Check if requested RSS config matches RSS config of VNIC
1249 * only if it is not a fresh VNIC configuration.
1250 * Otherwise the existing VNIC configuration can be used.
1252 if (vnic->rx_queue_cnt) {
1253 rc = match_vnic_rss_cfg(bp, vnic, rss);
1256 "VNIC and RSS config mismatch\n");
1257 rte_flow_error_set(error,
1259 RTE_FLOW_ERROR_TYPE_ACTION,
1261 "VNIC and RSS cfg mismatch");
1268 for (i = 0; i < rss->queue_num; i++) {
1269 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1272 if (!rss->queue[i] ||
1273 rss->queue[i] >= bp->rx_nr_rings ||
1274 !bp->rx_queues[rss->queue[i]]) {
1275 rte_flow_error_set(error,
1277 RTE_FLOW_ERROR_TYPE_ACTION,
1279 "Invalid queue ID for RSS");
1283 rxq = bp->rx_queues[rss->queue[i]];
1285 //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1286 //INVALID_HW_RING_ID ||
1287 //!rxq->rx_deferred_start) {
1288 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1289 INVALID_HW_RING_ID) {
1291 "queue active with other VNIC\n");
1292 rte_flow_error_set(error,
1294 RTE_FLOW_ERROR_TYPE_ACTION,
1296 "Invalid queue ID for RSS");
1302 rxq->rx_started = 1;
1303 vnic->rx_queue_cnt++;
1306 vnic->start_grp_id = rss->queue[0];
1307 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1308 vnic->func_default = 0; //This is not a default VNIC.
1310 rc = bnxt_vnic_prep(bp, vnic);
1312 rte_flow_error_set(error,
1314 RTE_FLOW_ERROR_TYPE_ACTION,
1322 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1323 vnic_id, vnic, vnic->fw_grp_ids);
1325 vnic->ff_pool_idx = vnic_id;
1327 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1329 /* This can be done only after vnic_grp_alloc is done. */
1330 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1331 vnic->fw_grp_ids[i] =
1332 bp->grp_info[rss->queue[i]].fw_grp_id;
1333 /* Make sure vnic0 does not use these rings. */
1334 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1338 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1339 for (i = 0; i < vnic->rx_queue_cnt; i++)
1340 vnic->rss_table[rss_idx++] =
1341 vnic->fw_grp_ids[i];
1344 /* Configure RSS only if the queue count is > 1 */
1345 if (vnic->rx_queue_cnt > 1) {
1347 bnxt_rte_to_hwrm_hash_types(rss->types);
1349 if (!rss->key_len) {
1350 /* If hash key has not been specified,
1351 * use random hash key.
1353 prandom_bytes(vnic->rss_hash_key,
1356 if (rss->key_len > HW_HASH_KEY_SIZE)
1357 memcpy(vnic->rss_hash_key,
1361 memcpy(vnic->rss_hash_key,
1365 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1367 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1371 filter->dst_id = vnic->fw_vnic_id;
1372 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1373 if (filter1 == NULL) {
1374 rte_flow_error_set(error,
1376 RTE_FLOW_ERROR_TYPE_ACTION,
1378 "New filter not available");
1383 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1384 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1387 rte_flow_error_set(error,
1389 RTE_FLOW_ERROR_TYPE_ACTION,
1396 if (filter1 && !filter->matching_l2_fltr_ptr) {
1397 bnxt_free_filter(bp, filter1);
1398 filter1->fw_l2_filter_id = -1;
1402 act = bnxt_flow_non_void_action(++act);
1403 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1404 rte_flow_error_set(error,
1406 RTE_FLOW_ERROR_TYPE_ACTION,
1416 //TODO: Cleanup according to ACTION TYPE.
1418 if (vnic && STAILQ_EMPTY(&vnic->filter))
1419 vnic->rx_queue_cnt = 0;
1421 if (rxq && !vnic->rx_queue_cnt)
1422 rxq->vnic = &bp->vnic_info[0];
1428 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1429 struct bnxt_filter_info *filter)
1431 struct bnxt_vnic_info *vnic = NULL;
1434 for (i = 0; i < bp->max_vnics; i++) {
1435 vnic = &bp->vnic_info[i];
1436 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1437 filter->dst_id == vnic->fw_vnic_id) {
1438 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1447 bnxt_flow_validate(struct rte_eth_dev *dev,
1448 const struct rte_flow_attr *attr,
1449 const struct rte_flow_item pattern[],
1450 const struct rte_flow_action actions[],
1451 struct rte_flow_error *error)
1453 struct bnxt *bp = dev->data->dev_private;
1454 struct bnxt_vnic_info *vnic = NULL;
1455 struct bnxt_filter_info *filter;
1458 bnxt_acquire_flow_lock(bp);
1459 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1461 bnxt_release_flow_lock(bp);
1465 filter = bnxt_get_unused_filter(bp);
1466 if (filter == NULL) {
1467 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1468 bnxt_release_flow_lock(bp);
1472 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1477 vnic = find_matching_vnic(bp, filter);
1479 if (STAILQ_EMPTY(&vnic->filter)) {
1480 rte_free(vnic->fw_grp_ids);
1481 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1482 bnxt_hwrm_vnic_free(bp, vnic);
1483 vnic->rx_queue_cnt = 0;
1485 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1489 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1490 bnxt_hwrm_clear_em_filter(bp, filter);
1491 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1492 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1494 bnxt_hwrm_clear_l2_filter(bp, filter);
1497 /* No need to hold on to this filter if we are just validating flow */
1498 filter->fw_l2_filter_id = UINT64_MAX;
1499 bnxt_free_filter(bp, filter);
1500 bnxt_release_flow_lock(bp);
1506 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1507 struct bnxt_filter_info *new_filter)
1509 /* Clear the new L2 filter that was created in the previous step in
1510 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1511 * filter which points to the new destination queue and so we clear
1512 * the previous L2 filter. For ntuple filters, we are going to reuse
1513 * the old L2 filter and create new NTUPLE filter with this new
1514 * destination queue subsequently during bnxt_flow_create.
1516 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1517 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1518 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1520 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1521 bnxt_hwrm_clear_em_filter(bp, old_filter);
1522 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1523 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1528 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1530 struct bnxt_filter_info *mf;
1531 struct rte_flow *flow;
1534 for (i = bp->max_vnics - 1; i >= 0; i--) {
1535 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1537 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1540 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1543 if (mf->filter_type == nf->filter_type &&
1544 mf->flags == nf->flags &&
1545 mf->src_port == nf->src_port &&
1546 mf->src_port_mask == nf->src_port_mask &&
1547 mf->dst_port == nf->dst_port &&
1548 mf->dst_port_mask == nf->dst_port_mask &&
1549 mf->ip_protocol == nf->ip_protocol &&
1550 mf->ip_addr_type == nf->ip_addr_type &&
1551 mf->ethertype == nf->ethertype &&
1552 mf->vni == nf->vni &&
1553 mf->tunnel_type == nf->tunnel_type &&
1554 mf->l2_ovlan == nf->l2_ovlan &&
1555 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1556 mf->l2_ivlan == nf->l2_ivlan &&
1557 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1558 !memcmp(mf->l2_addr, nf->l2_addr,
1559 RTE_ETHER_ADDR_LEN) &&
1560 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1561 RTE_ETHER_ADDR_LEN) &&
1562 !memcmp(mf->src_macaddr, nf->src_macaddr,
1563 RTE_ETHER_ADDR_LEN) &&
1564 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1565 RTE_ETHER_ADDR_LEN) &&
1566 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1567 sizeof(nf->src_ipaddr)) &&
1568 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1569 sizeof(nf->src_ipaddr_mask)) &&
1570 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1571 sizeof(nf->dst_ipaddr)) &&
1572 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1573 sizeof(nf->dst_ipaddr_mask))) {
1574 if (mf->dst_id == nf->dst_id)
1576 /* Free the old filter, update flow
1579 bnxt_update_filter(bp, mf, nf);
1580 STAILQ_REMOVE(&vnic->filter, mf,
1581 bnxt_filter_info, next);
1582 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1583 bnxt_free_filter(bp, mf);
1592 static struct rte_flow *
1593 bnxt_flow_create(struct rte_eth_dev *dev,
1594 const struct rte_flow_attr *attr,
1595 const struct rte_flow_item pattern[],
1596 const struct rte_flow_action actions[],
1597 struct rte_flow_error *error)
1599 struct bnxt *bp = dev->data->dev_private;
1600 struct bnxt_vnic_info *vnic = NULL;
1601 struct bnxt_filter_info *filter;
1602 bool update_flow = false;
1603 struct rte_flow *flow;
1607 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1608 rte_flow_error_set(error, EINVAL,
1609 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1610 "Failed to create flow, Not a Trusted VF!");
1614 if (!dev->data->dev_started) {
1615 rte_flow_error_set(error,
1617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1619 "Device must be started");
1623 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1625 rte_flow_error_set(error, ENOMEM,
1626 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1627 "Failed to allocate memory");
1631 bnxt_acquire_flow_lock(bp);
1632 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1634 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1638 filter = bnxt_get_unused_filter(bp);
1639 if (filter == NULL) {
1640 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1644 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1649 ret = bnxt_match_filter(bp, filter);
1650 if (ret == -EEXIST) {
1651 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1652 /* Clear the filter that was created as part of
1653 * validate_and_parse_flow() above
1655 bnxt_hwrm_clear_l2_filter(bp, filter);
1657 } else if (ret == -EXDEV) {
1658 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1659 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1663 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1664 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1667 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1668 filter->enables == filter->tunnel_type) {
1669 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1671 rte_flow_error_set(error, -ret,
1672 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1673 "Unable to query tunnel to VF");
1676 if (tun_type == (1U << filter->tunnel_type)) {
1678 bnxt_hwrm_tunnel_redirect_free(bp,
1679 filter->tunnel_type);
1682 "Unable to free existing tunnel\n");
1683 rte_flow_error_set(error, -ret,
1684 RTE_FLOW_ERROR_TYPE_HANDLE,
1686 "Unable to free preexisting "
1691 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1693 rte_flow_error_set(error, -ret,
1694 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1695 "Unable to redirect tunnel to VF");
1698 vnic = &bp->vnic_info[0];
1702 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1704 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1705 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1708 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1710 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1711 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1714 vnic = find_matching_vnic(bp, filter);
1716 if (!ret || update_flow) {
1717 flow->filter = filter;
1719 /* VNIC is set only in case of queue or RSS action */
1722 * RxQ0 is not used for flow filters.
1729 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1731 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1732 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1733 bnxt_release_flow_lock(bp);
1737 flow->filter = filter;
1743 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1744 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1748 bnxt_free_filter(bp, filter);
1751 rte_flow_error_set(error, ret,
1752 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1753 "Matching Flow exists.");
1754 else if (ret == -EXDEV)
1755 rte_flow_error_set(error, 0,
1756 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1757 "Flow with pattern exists, updating destination queue");
1759 rte_flow_error_set(error, -ret,
1760 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1761 "Failed to create flow.");
1764 bnxt_release_flow_lock(bp);
1768 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1769 struct bnxt_filter_info *filter,
1770 struct rte_flow_error *error)
1772 uint16_t tun_dst_fid;
1776 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1778 rte_flow_error_set(error, -ret,
1779 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1780 "Unable to query tunnel to VF");
1783 if (tun_type == (1U << filter->tunnel_type)) {
1784 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1787 rte_flow_error_set(error, -ret,
1788 RTE_FLOW_ERROR_TYPE_HANDLE,
1790 "tunnel_redirect info cmd fail");
1793 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1794 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1796 /* Tunnel doesn't belong to this VF, so don't send HWRM
1797 * cmd, just delete the flow from driver
1799 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1801 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1803 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1804 filter->tunnel_type);
1810 bnxt_flow_destroy(struct rte_eth_dev *dev,
1811 struct rte_flow *flow,
1812 struct rte_flow_error *error)
1814 struct bnxt *bp = dev->data->dev_private;
1815 struct bnxt_filter_info *filter;
1816 struct bnxt_vnic_info *vnic;
1819 bnxt_acquire_flow_lock(bp);
1821 rte_flow_error_set(error, EINVAL,
1822 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1823 "Invalid flow: failed to destroy flow.");
1824 bnxt_release_flow_lock(bp);
1828 filter = flow->filter;
1832 rte_flow_error_set(error, EINVAL,
1833 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1834 "Invalid flow: failed to destroy flow.");
1835 bnxt_release_flow_lock(bp);
1839 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1840 filter->enables == filter->tunnel_type) {
1841 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1847 bnxt_release_flow_lock(bp);
1852 ret = bnxt_match_filter(bp, filter);
1854 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1856 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1857 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1858 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1859 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1860 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1864 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1865 bnxt_free_filter(bp, filter);
1866 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1869 /* If this was the last flow associated with this vnic,
1870 * switch the queue back to RSS pool.
1872 if (vnic && STAILQ_EMPTY(&vnic->flow_list)) {
1873 rte_free(vnic->fw_grp_ids);
1874 if (vnic->rx_queue_cnt > 1)
1875 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1877 bnxt_hwrm_vnic_free(bp, vnic);
1878 vnic->rx_queue_cnt = 0;
1882 rte_flow_error_set(error, -ret,
1883 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1884 "Failed to destroy flow.");
1887 bnxt_release_flow_lock(bp);
1892 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1894 struct bnxt *bp = dev->data->dev_private;
1895 struct bnxt_vnic_info *vnic;
1896 struct rte_flow *flow;
1900 bnxt_acquire_flow_lock(bp);
1901 for (i = 0; i < bp->max_vnics; i++) {
1902 vnic = &bp->vnic_info[i];
1903 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1906 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1907 struct bnxt_filter_info *filter = flow->filter;
1909 if (filter->filter_type ==
1910 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1911 filter->enables == filter->tunnel_type) {
1913 bnxt_handle_tunnel_redirect_destroy(bp,
1919 bnxt_release_flow_lock(bp);
1924 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1925 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1926 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1927 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1929 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1935 RTE_FLOW_ERROR_TYPE_HANDLE,
1937 "Failed to flush flow in HW.");
1938 bnxt_release_flow_lock(bp);
1942 bnxt_free_filter(bp, filter);
1943 STAILQ_REMOVE(&vnic->flow_list, flow,
1949 bnxt_release_flow_lock(bp);
1953 const struct rte_flow_ops bnxt_flow_ops = {
1954 .validate = bnxt_flow_validate,
1955 .create = bnxt_flow_create,
1956 .destroy = bnxt_flow_destroy,
1957 .flush = bnxt_flow_flush,