1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25 const struct rte_flow_item pattern[],
26 const struct rte_flow_action actions[],
27 struct rte_flow_error *error)
30 rte_flow_error_set(error,
32 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
39 rte_flow_error_set(error,
41 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
48 rte_flow_error_set(error,
50 RTE_FLOW_ERROR_TYPE_ATTR,
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81 struct rte_flow_error *error __rte_unused)
83 const struct rte_flow_item *item =
84 bnxt_flow_non_void_item(pattern);
88 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
90 case RTE_FLOW_ITEM_TYPE_ANY:
91 case RTE_FLOW_ITEM_TYPE_ETH:
94 case RTE_FLOW_ITEM_TYPE_VLAN:
98 case RTE_FLOW_ITEM_TYPE_IPV4:
99 case RTE_FLOW_ITEM_TYPE_IPV6:
100 case RTE_FLOW_ITEM_TYPE_TCP:
101 case RTE_FLOW_ITEM_TYPE_UDP:
103 /* need ntuple match, reset exact match */
107 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
113 if (has_vlan && use_ntuple) {
115 "VLAN flow cannot use NTUPLE filter\n");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ITEM,
119 "Cannot use VLAN with NTUPLE");
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128 const struct rte_flow_attr *attr,
129 const struct rte_flow_item pattern[],
130 struct rte_flow_error *error,
131 struct bnxt_filter_info *filter)
133 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138 const struct rte_flow_item_udp *udp_spec, *udp_mask;
139 const struct rte_flow_item_eth *eth_spec, *eth_mask;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0, valid_flags = 0;
152 uint32_t en_ethertype;
159 use_ntuple = bnxt_filter_type_check(pattern, error);
162 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
164 filter->filter_type = use_ntuple ?
165 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
166 en_ethertype = use_ntuple ?
167 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
168 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
170 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
172 /* last or range is NOT supported as match criteria */
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ITEM,
176 "No support for range");
180 switch (item->type) {
181 case RTE_FLOW_ITEM_TYPE_ANY:
183 ((const struct rte_flow_item_any *)item->spec)->num > 3;
185 PMD_DRV_LOG(DEBUG, "Parse inner header\n");
187 case RTE_FLOW_ITEM_TYPE_ETH:
188 if (!item->spec || !item->mask)
191 eth_spec = item->spec;
192 eth_mask = item->mask;
194 /* Source MAC address mask cannot be partially set.
195 * Should be All 0's or all 1's.
196 * Destination MAC address mask must not be partially
197 * set. Should be all 1's or all 0's.
199 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
200 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
201 (!rte_is_zero_ether_addr(ð_mask->dst) &&
202 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
203 rte_flow_error_set(error,
205 RTE_FLOW_ERROR_TYPE_ITEM,
207 "MAC_addr mask not valid");
211 /* Mask is not allowed. Only exact matches are */
212 if (eth_mask->type &&
213 eth_mask->type != RTE_BE16(0xffff)) {
214 rte_flow_error_set(error, EINVAL,
215 RTE_FLOW_ERROR_TYPE_ITEM,
217 "ethertype mask not valid");
221 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
222 if (!rte_is_unicast_ether_addr(ð_spec->dst)) {
223 rte_flow_error_set(error,
225 RTE_FLOW_ERROR_TYPE_ITEM,
230 rte_memcpy(filter->dst_macaddr,
231 ð_spec->dst, RTE_ETHER_ADDR_LEN);
233 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
234 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
235 valid_flags |= inner ?
236 BNXT_FLOW_L2_INNER_DST_VALID_FLAG :
237 BNXT_FLOW_L2_DST_VALID_FLAG;
238 filter->priority = attr->priority;
240 "Creating a priority flow\n");
243 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
244 if (!rte_is_unicast_ether_addr(ð_spec->src)) {
245 rte_flow_error_set(error,
247 RTE_FLOW_ERROR_TYPE_ITEM,
252 rte_memcpy(filter->src_macaddr,
253 ð_spec->src, RTE_ETHER_ADDR_LEN);
255 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
256 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
257 valid_flags |= inner ?
258 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG :
259 BNXT_FLOW_L2_SRC_VALID_FLAG;
262 * PMD_DRV_LOG(ERR, "Handle this condition\n");
265 if (eth_mask->type) {
267 rte_be_to_cpu_16(eth_spec->type);
272 case RTE_FLOW_ITEM_TYPE_VLAN:
273 vlan_spec = item->spec;
274 vlan_mask = item->mask;
275 if (en & en_ethertype) {
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_ITEM,
279 "VLAN TPID matching is not"
283 if (vlan_mask->tci &&
284 vlan_mask->tci == RTE_BE16(0x0fff)) {
285 /* Only the VLAN ID can be matched. */
287 rte_be_to_cpu_16(vlan_spec->tci &
289 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
291 rte_flow_error_set(error,
293 RTE_FLOW_ERROR_TYPE_ITEM,
295 "VLAN mask is invalid");
298 if (vlan_mask->inner_type &&
299 vlan_mask->inner_type != RTE_BE16(0xffff)) {
300 rte_flow_error_set(error, EINVAL,
301 RTE_FLOW_ERROR_TYPE_ITEM,
303 "inner ethertype mask not"
307 if (vlan_mask->inner_type) {
309 rte_be_to_cpu_16(vlan_spec->inner_type);
314 case RTE_FLOW_ITEM_TYPE_IPV4:
315 /* If mask is not involved, we could use EM filters. */
316 ipv4_spec = item->spec;
317 ipv4_mask = item->mask;
319 if (!item->spec || !item->mask)
322 /* Only IP DST and SRC fields are maskable. */
323 if (ipv4_mask->hdr.version_ihl ||
324 ipv4_mask->hdr.type_of_service ||
325 ipv4_mask->hdr.total_length ||
326 ipv4_mask->hdr.packet_id ||
327 ipv4_mask->hdr.fragment_offset ||
328 ipv4_mask->hdr.time_to_live ||
329 ipv4_mask->hdr.next_proto_id ||
330 ipv4_mask->hdr.hdr_checksum) {
331 rte_flow_error_set(error,
333 RTE_FLOW_ERROR_TYPE_ITEM,
335 "Invalid IPv4 mask.");
339 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
340 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
343 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
344 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
346 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
347 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
349 if (ipv4_mask->hdr.src_addr) {
350 filter->src_ipaddr_mask[0] =
351 ipv4_mask->hdr.src_addr;
352 en |= !use_ntuple ? 0 :
353 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
356 if (ipv4_mask->hdr.dst_addr) {
357 filter->dst_ipaddr_mask[0] =
358 ipv4_mask->hdr.dst_addr;
359 en |= !use_ntuple ? 0 :
360 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
363 filter->ip_addr_type = use_ntuple ?
364 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
365 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
367 if (ipv4_spec->hdr.next_proto_id) {
368 filter->ip_protocol =
369 ipv4_spec->hdr.next_proto_id;
371 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
373 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
376 case RTE_FLOW_ITEM_TYPE_IPV6:
377 ipv6_spec = item->spec;
378 ipv6_mask = item->mask;
380 if (!item->spec || !item->mask)
383 /* Only IP DST and SRC fields are maskable. */
384 if (ipv6_mask->hdr.vtc_flow ||
385 ipv6_mask->hdr.payload_len ||
386 ipv6_mask->hdr.proto ||
387 ipv6_mask->hdr.hop_limits) {
388 rte_flow_error_set(error,
390 RTE_FLOW_ERROR_TYPE_ITEM,
392 "Invalid IPv6 mask.");
397 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
398 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
400 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
401 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
403 rte_memcpy(filter->src_ipaddr,
404 ipv6_spec->hdr.src_addr, 16);
405 rte_memcpy(filter->dst_ipaddr,
406 ipv6_spec->hdr.dst_addr, 16);
408 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
410 rte_memcpy(filter->src_ipaddr_mask,
411 ipv6_mask->hdr.src_addr, 16);
412 en |= !use_ntuple ? 0 :
413 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
416 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
418 rte_memcpy(filter->dst_ipaddr_mask,
419 ipv6_mask->hdr.dst_addr, 16);
420 en |= !use_ntuple ? 0 :
421 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
424 filter->ip_addr_type = use_ntuple ?
425 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
426 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
428 case RTE_FLOW_ITEM_TYPE_TCP:
429 tcp_spec = item->spec;
430 tcp_mask = item->mask;
432 if (!item->spec || !item->mask)
435 /* Check TCP mask. Only DST & SRC ports are maskable */
436 if (tcp_mask->hdr.sent_seq ||
437 tcp_mask->hdr.recv_ack ||
438 tcp_mask->hdr.data_off ||
439 tcp_mask->hdr.tcp_flags ||
440 tcp_mask->hdr.rx_win ||
441 tcp_mask->hdr.cksum ||
442 tcp_mask->hdr.tcp_urp) {
443 rte_flow_error_set(error,
445 RTE_FLOW_ERROR_TYPE_ITEM,
451 filter->src_port = tcp_spec->hdr.src_port;
452 filter->dst_port = tcp_spec->hdr.dst_port;
455 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
456 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
458 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
459 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
461 if (tcp_mask->hdr.dst_port) {
462 filter->dst_port_mask = tcp_mask->hdr.dst_port;
463 en |= !use_ntuple ? 0 :
464 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
467 if (tcp_mask->hdr.src_port) {
468 filter->src_port_mask = tcp_mask->hdr.src_port;
469 en |= !use_ntuple ? 0 :
470 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
473 case RTE_FLOW_ITEM_TYPE_UDP:
474 udp_spec = item->spec;
475 udp_mask = item->mask;
477 if (!item->spec || !item->mask)
480 if (udp_mask->hdr.dgram_len ||
481 udp_mask->hdr.dgram_cksum) {
482 rte_flow_error_set(error,
484 RTE_FLOW_ERROR_TYPE_ITEM,
490 filter->src_port = udp_spec->hdr.src_port;
491 filter->dst_port = udp_spec->hdr.dst_port;
494 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
495 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
497 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
498 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
500 if (udp_mask->hdr.dst_port) {
501 filter->dst_port_mask = udp_mask->hdr.dst_port;
502 en |= !use_ntuple ? 0 :
503 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
506 if (udp_mask->hdr.src_port) {
507 filter->src_port_mask = udp_mask->hdr.src_port;
508 en |= !use_ntuple ? 0 :
509 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
512 case RTE_FLOW_ITEM_TYPE_VXLAN:
513 vxlan_spec = item->spec;
514 vxlan_mask = item->mask;
515 /* Check if VXLAN item is used to describe protocol.
516 * If yes, both spec and mask should be NULL.
517 * If no, both spec and mask shouldn't be NULL.
519 if ((!vxlan_spec && vxlan_mask) ||
520 (vxlan_spec && !vxlan_mask)) {
521 rte_flow_error_set(error,
523 RTE_FLOW_ERROR_TYPE_ITEM,
525 "Invalid VXLAN item");
529 if (!vxlan_spec && !vxlan_mask) {
530 filter->tunnel_type =
531 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
535 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
536 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
537 vxlan_spec->flags != 0x8) {
538 rte_flow_error_set(error,
540 RTE_FLOW_ERROR_TYPE_ITEM,
542 "Invalid VXLAN item");
546 /* Check if VNI is masked. */
547 if (vxlan_spec && vxlan_mask) {
549 !!memcmp(vxlan_mask->vni, vni_mask,
555 RTE_FLOW_ERROR_TYPE_ITEM,
561 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
564 rte_be_to_cpu_32(tenant_id_be);
565 filter->tunnel_type =
566 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
569 case RTE_FLOW_ITEM_TYPE_NVGRE:
570 nvgre_spec = item->spec;
571 nvgre_mask = item->mask;
572 /* Check if NVGRE item is used to describe protocol.
573 * If yes, both spec and mask should be NULL.
574 * If no, both spec and mask shouldn't be NULL.
576 if ((!nvgre_spec && nvgre_mask) ||
577 (nvgre_spec && !nvgre_mask)) {
578 rte_flow_error_set(error,
580 RTE_FLOW_ERROR_TYPE_ITEM,
582 "Invalid NVGRE item");
586 if (!nvgre_spec && !nvgre_mask) {
587 filter->tunnel_type =
588 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
592 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
593 nvgre_spec->protocol != 0x6558) {
594 rte_flow_error_set(error,
596 RTE_FLOW_ERROR_TYPE_ITEM,
598 "Invalid NVGRE item");
602 if (nvgre_spec && nvgre_mask) {
604 !!memcmp(nvgre_mask->tni, tni_mask,
610 RTE_FLOW_ERROR_TYPE_ITEM,
615 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
618 rte_be_to_cpu_32(tenant_id_be);
619 filter->tunnel_type =
620 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
624 case RTE_FLOW_ITEM_TYPE_GRE:
625 gre_spec = (const struct rte_flow_item_gre *)item->spec;
626 gre_mask = (const struct rte_flow_item_gre *)item->mask;
629 *Check if GRE item is used to describe protocol.
630 * If yes, both spec and mask should be NULL.
631 * If no, both spec and mask shouldn't be NULL.
633 if (!!gre_spec ^ !!gre_mask) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
641 if (!gre_spec && !gre_mask) {
642 filter->tunnel_type =
643 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
648 case RTE_FLOW_ITEM_TYPE_VF:
649 vf_spec = item->spec;
652 rte_flow_error_set(error,
654 RTE_FLOW_ERROR_TYPE_ITEM,
656 "Configuring on a VF!");
660 if (vf >= bp->pdev->max_vfs) {
661 rte_flow_error_set(error,
663 RTE_FLOW_ERROR_TYPE_ITEM,
669 if (!attr->transfer) {
670 rte_flow_error_set(error,
672 RTE_FLOW_ERROR_TYPE_ITEM,
674 "Matching VF traffic without"
675 " affecting it (transfer attribute)"
680 filter->mirror_vnic_id =
681 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
683 /* This simply indicates there's no driver
684 * loaded. This is not an error.
689 RTE_FLOW_ERROR_TYPE_ITEM,
691 "Unable to get default VNIC for VF");
695 filter->mirror_vnic_id = dflt_vnic;
696 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
703 filter->enables = en;
704 filter->valid_flags = valid_flags;
709 /* Parse attributes */
711 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
712 struct rte_flow_error *error)
714 /* Must be input direction */
715 if (!attr->ingress) {
716 rte_flow_error_set(error,
718 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
720 "Only support ingress.");
726 rte_flow_error_set(error,
728 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
730 "No support for egress.");
737 static struct bnxt_filter_info *
738 bnxt_find_matching_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
740 struct bnxt_filter_info *mf, *f0;
741 struct bnxt_vnic_info *vnic0;
742 struct rte_flow *flow;
745 vnic0 = &bp->vnic_info[0];
746 f0 = STAILQ_FIRST(&vnic0->filter);
748 /* This flow has same DST MAC as the port/l2 filter. */
749 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
752 for (i = bp->max_vnics - 1; i >= 0; i--) {
753 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
755 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
758 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
761 if (mf->matching_l2_fltr_ptr)
764 if (mf->ethertype == nf->ethertype &&
765 mf->l2_ovlan == nf->l2_ovlan &&
766 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
767 mf->l2_ivlan == nf->l2_ivlan &&
768 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
769 !memcmp(mf->src_macaddr, nf->src_macaddr,
770 RTE_ETHER_ADDR_LEN) &&
771 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
779 static struct bnxt_filter_info *
780 bnxt_create_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
781 struct bnxt_vnic_info *vnic)
783 struct bnxt_filter_info *filter1;
786 /* Alloc new L2 filter.
787 * This flow needs MAC filter which does not match any existing
790 filter1 = bnxt_get_unused_filter(bp);
794 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE;
795 filter1->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
796 if (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
797 nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG) {
799 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST;
800 PMD_DRV_LOG(DEBUG, "Create Outer filter\n");
803 if (nf->filter_type == HWRM_CFA_L2_FILTER &&
804 (nf->valid_flags & BNXT_FLOW_L2_SRC_VALID_FLAG ||
805 nf->valid_flags & BNXT_FLOW_L2_INNER_SRC_VALID_FLAG)) {
806 PMD_DRV_LOG(DEBUG, "Create L2 filter for SRC MAC\n");
808 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_SOURCE_VALID;
809 memcpy(filter1->l2_addr, nf->src_macaddr, RTE_ETHER_ADDR_LEN);
811 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
812 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
816 (nf->valid_flags & BNXT_FLOW_L2_DST_VALID_FLAG ||
817 nf->valid_flags & BNXT_FLOW_L2_INNER_DST_VALID_FLAG)) {
818 /* Tell the FW where to place the filter in the table. */
819 if (nf->priority > 65535) {
821 HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER;
822 /* This will place the filter in TCAM */
823 filter1->l2_filter_id_hint = (uint64_t)-1;
827 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
828 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
829 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
830 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
833 bnxt_free_filter(bp, filter1);
836 filter1->l2_ref_cnt++;
840 struct bnxt_filter_info *
841 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
842 struct bnxt_vnic_info *vnic)
844 struct bnxt_filter_info *l2_filter = NULL;
846 l2_filter = bnxt_find_matching_l2_filter(bp, nf);
848 l2_filter->l2_ref_cnt++;
849 nf->matching_l2_fltr_ptr = l2_filter;
851 l2_filter = bnxt_create_l2_filter(bp, nf, vnic);
852 nf->matching_l2_fltr_ptr = NULL;
858 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
860 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
861 uint64_t rx_offloads = dev_conf->rxmode.offloads;
864 rc = bnxt_vnic_grp_alloc(bp, vnic);
868 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
870 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
875 /* RSS context is required only when there is more than one RSS ring */
876 if (vnic->rx_queue_cnt > 1) {
877 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
880 "HWRM vnic ctx alloc failure: %x\n", rc);
884 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
887 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
888 vnic->vlan_strip = true;
890 vnic->vlan_strip = false;
892 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
896 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
902 static int match_vnic_rss_cfg(struct bnxt *bp,
903 struct bnxt_vnic_info *vnic,
904 const struct rte_flow_action_rss *rss)
906 unsigned int match = 0, i;
908 if (vnic->rx_queue_cnt != rss->queue_num)
911 for (i = 0; i < rss->queue_num; i++) {
912 if (!bp->rx_queues[rss->queue[i]]->vnic->rx_queue_cnt &&
913 !bp->rx_queues[rss->queue[i]]->rx_started)
917 for (i = 0; i < vnic->rx_queue_cnt; i++) {
920 for (j = 0; j < vnic->rx_queue_cnt; j++) {
921 if (bp->grp_info[rss->queue[i]].fw_grp_id ==
927 if (match != vnic->rx_queue_cnt) {
929 "VNIC queue count %d vs queues matched %d\n",
930 match, vnic->rx_queue_cnt);
938 bnxt_update_filter_flags_en(struct bnxt_filter_info *filter,
939 struct bnxt_filter_info *filter1,
943 !(filter->valid_flags &
944 ~(BNXT_FLOW_L2_DST_VALID_FLAG |
945 BNXT_FLOW_L2_SRC_VALID_FLAG |
946 BNXT_FLOW_L2_INNER_SRC_VALID_FLAG |
947 BNXT_FLOW_L2_INNER_DST_VALID_FLAG))) {
948 filter->flags = filter1->flags;
949 filter->enables = filter1->enables;
950 filter->filter_type = HWRM_CFA_L2_FILTER;
951 memcpy(filter->l2_addr, filter1->l2_addr, RTE_ETHER_ADDR_LEN);
952 memset(filter->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
953 filter->pri_hint = filter1->pri_hint;
954 filter->l2_filter_id_hint = filter1->l2_filter_id_hint;
956 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
957 filter->l2_ref_cnt = filter1->l2_ref_cnt;
959 "l2_filter: %p fw_l2_filter_id %" PRIx64 " l2_ref_cnt %u\n",
960 filter1, filter->fw_l2_filter_id, filter->l2_ref_cnt);
964 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
965 const struct rte_flow_item pattern[],
966 const struct rte_flow_action actions[],
967 const struct rte_flow_attr *attr,
968 struct rte_flow_error *error,
969 struct bnxt_filter_info *filter)
971 const struct rte_flow_action *act =
972 bnxt_flow_non_void_action(actions);
973 struct bnxt *bp = dev->data->dev_private;
974 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
975 const struct rte_flow_action_queue *act_q;
976 const struct rte_flow_action_vf *act_vf;
977 struct bnxt_filter_info *filter1 = NULL;
978 const struct rte_flow_action_rss *rss;
979 struct bnxt_vnic_info *vnic, *vnic0;
980 struct bnxt_rx_queue *rxq = NULL;
981 int dflt_vnic, vnic_id;
982 unsigned int rss_idx;
987 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
991 rc = bnxt_flow_parse_attr(attr, error);
995 /* Since we support ingress attribute only - right now. */
996 if (filter->filter_type == HWRM_CFA_EM_FILTER)
997 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
999 use_ntuple = bnxt_filter_type_check(pattern, error);
1000 switch (act->type) {
1001 case RTE_FLOW_ACTION_TYPE_QUEUE:
1002 /* Allow this flow. Redirect to a VNIC. */
1003 act_q = (const struct rte_flow_action_queue *)act->conf;
1004 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
1005 rte_flow_error_set(error,
1007 RTE_FLOW_ERROR_TYPE_ACTION,
1009 "Invalid queue ID.");
1013 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
1015 vnic_id = attr->group;
1017 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
1018 vnic_id = act_q->index;
1021 vnic = &bp->vnic_info[vnic_id];
1023 rte_flow_error_set(error,
1025 RTE_FLOW_ERROR_TYPE_ACTION,
1027 "No matching VNIC found.");
1031 if (vnic->rx_queue_cnt) {
1032 if (vnic->start_grp_id != act_q->index) {
1034 "VNIC already in use\n");
1035 rte_flow_error_set(error,
1037 RTE_FLOW_ERROR_TYPE_ACTION,
1039 "VNIC already in use");
1046 rxq = bp->rx_queues[act_q->index];
1048 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq &&
1049 vnic->fw_vnic_id != INVALID_HW_RING_ID)
1053 //bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1054 //INVALID_HW_RING_ID ||
1055 //!rxq->rx_deferred_start) {
1057 bp->vnic_info[0].fw_grp_ids[act_q->index] !=
1058 INVALID_HW_RING_ID) {
1060 "Queue invalid or used with other VNIC\n");
1061 rte_flow_error_set(error,
1063 RTE_FLOW_ERROR_TYPE_ACTION,
1065 "Queue invalid queue or in use");
1071 rxq->rx_started = 1;
1072 vnic->rx_queue_cnt++;
1073 vnic->start_grp_id = act_q->index;
1074 vnic->end_grp_id = act_q->index;
1075 vnic->func_default = 0; //This is not a default VNIC.
1077 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1079 rc = bnxt_vnic_prep(bp, vnic);
1084 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1085 act_q->index, vnic, vnic->fw_grp_ids);
1088 vnic->ff_pool_idx = vnic_id;
1090 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
1091 filter->dst_id = vnic->fw_vnic_id;
1092 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1093 if (filter1 == NULL) {
1098 PMD_DRV_LOG(DEBUG, "new fltr: %p l2fltr: %p l2_ref_cnt: %d\n",
1099 filter, filter1, filter1->l2_ref_cnt);
1100 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1102 case RTE_FLOW_ACTION_TYPE_DROP:
1103 vnic0 = &bp->vnic_info[0];
1104 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1105 if (filter1 == NULL) {
1110 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1111 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1113 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
1116 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
1118 case RTE_FLOW_ACTION_TYPE_COUNT:
1119 vnic0 = &bp->vnic_info[0];
1120 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1121 if (filter1 == NULL) {
1126 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1127 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
1129 case RTE_FLOW_ACTION_TYPE_VF:
1130 act_vf = (const struct rte_flow_action_vf *)act->conf;
1133 if (filter->tunnel_type ==
1134 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
1135 filter->tunnel_type ==
1136 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
1137 /* If issued on a VF, ensure id is 0 and is trusted */
1139 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
1140 rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ACTION,
1149 filter->enables |= filter->tunnel_type;
1150 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
1154 if (vf >= bp->pdev->max_vfs) {
1155 rte_flow_error_set(error,
1157 RTE_FLOW_ERROR_TYPE_ACTION,
1159 "Incorrect VF id!");
1164 filter->mirror_vnic_id =
1165 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
1166 if (dflt_vnic < 0) {
1167 /* This simply indicates there's no driver loaded.
1168 * This is not an error.
1170 rte_flow_error_set(error,
1172 RTE_FLOW_ERROR_TYPE_ACTION,
1174 "Unable to get default VNIC for VF");
1179 filter->mirror_vnic_id = dflt_vnic;
1180 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1182 vnic0 = &bp->vnic_info[0];
1183 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1184 if (filter1 == NULL) {
1189 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1191 case RTE_FLOW_ACTION_TYPE_RSS:
1192 rss = (const struct rte_flow_action_rss *)act->conf;
1194 vnic_id = attr->group;
1196 PMD_DRV_LOG(ERR, "Group id cannot be 0\n");
1197 rte_flow_error_set(error,
1199 RTE_FLOW_ERROR_TYPE_ATTR,
1201 "Group id cannot be 0");
1206 vnic = &bp->vnic_info[vnic_id];
1208 rte_flow_error_set(error,
1210 RTE_FLOW_ERROR_TYPE_ACTION,
1212 "No matching VNIC for RSS group.");
1216 PMD_DRV_LOG(DEBUG, "VNIC found\n");
1218 /* Check if requested RSS config matches RSS config of VNIC
1219 * only if it is not a fresh VNIC configuration.
1220 * Otherwise the existing VNIC configuration can be used.
1222 if (vnic->rx_queue_cnt) {
1223 rc = match_vnic_rss_cfg(bp, vnic, rss);
1226 "VNIC and RSS config mismatch\n");
1227 rte_flow_error_set(error,
1229 RTE_FLOW_ERROR_TYPE_ACTION,
1231 "VNIC and RSS cfg mismatch");
1238 for (i = 0; i < rss->queue_num; i++) {
1239 PMD_DRV_LOG(DEBUG, "RSS action Queue %d\n",
1242 if (!rss->queue[i] ||
1243 rss->queue[i] >= bp->rx_nr_rings ||
1244 !bp->rx_queues[rss->queue[i]]) {
1245 rte_flow_error_set(error,
1247 RTE_FLOW_ERROR_TYPE_ACTION,
1249 "Invalid queue ID for RSS");
1253 rxq = bp->rx_queues[rss->queue[i]];
1255 //if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1256 //INVALID_HW_RING_ID ||
1257 //!rxq->rx_deferred_start) {
1258 if (bp->vnic_info[0].fw_grp_ids[rss->queue[i]] !=
1259 INVALID_HW_RING_ID) {
1261 "queue active with other VNIC\n");
1262 rte_flow_error_set(error,
1264 RTE_FLOW_ERROR_TYPE_ACTION,
1266 "Invalid queue ID for RSS");
1272 rxq->rx_started = 1;
1273 vnic->rx_queue_cnt++;
1276 vnic->start_grp_id = rss->queue[0];
1277 vnic->end_grp_id = rss->queue[rss->queue_num - 1];
1278 vnic->func_default = 0; //This is not a default VNIC.
1280 rc = bnxt_vnic_prep(bp, vnic);
1285 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
1286 vnic_id, vnic, vnic->fw_grp_ids);
1288 vnic->ff_pool_idx = vnic_id;
1290 "Setting vnic ff_pool_idx %d\n", vnic->ff_pool_idx);
1292 /* This can be done only after vnic_grp_alloc is done. */
1293 for (i = 0; i < vnic->rx_queue_cnt; i++) {
1294 vnic->fw_grp_ids[i] =
1295 bp->grp_info[rss->queue[i]].fw_grp_id;
1296 /* Make sure vnic0 does not use these rings. */
1297 bp->vnic_info[0].fw_grp_ids[rss->queue[i]] =
1301 for (rss_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; ) {
1302 for (i = 0; i < vnic->rx_queue_cnt; i++)
1303 vnic->rss_table[rss_idx++] =
1304 vnic->fw_grp_ids[i];
1307 /* Configure RSS only if the queue count is > 1 */
1308 if (vnic->rx_queue_cnt > 1) {
1310 bnxt_rte_to_hwrm_hash_types(rss->types);
1312 if (!rss->key_len) {
1313 /* If hash key has not been specified,
1314 * use random hash key.
1316 prandom_bytes(vnic->rss_hash_key,
1319 if (rss->key_len > HW_HASH_KEY_SIZE)
1320 memcpy(vnic->rss_hash_key,
1324 memcpy(vnic->rss_hash_key,
1328 bnxt_hwrm_vnic_rss_cfg(bp, vnic);
1330 PMD_DRV_LOG(DEBUG, "No RSS config required\n");
1334 filter->dst_id = vnic->fw_vnic_id;
1335 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
1336 if (filter1 == NULL) {
1341 PMD_DRV_LOG(DEBUG, "L2 filter created\n");
1342 bnxt_update_filter_flags_en(filter, filter1, use_ntuple);
1345 rte_flow_error_set(error,
1347 RTE_FLOW_ERROR_TYPE_ACTION,
1354 if (filter1 && !filter->matching_l2_fltr_ptr) {
1355 bnxt_free_filter(bp, filter1);
1356 filter1->fw_l2_filter_id = -1;
1360 act = bnxt_flow_non_void_action(++act);
1361 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1362 rte_flow_error_set(error,
1364 RTE_FLOW_ERROR_TYPE_ACTION,
1375 struct bnxt_vnic_info *find_matching_vnic(struct bnxt *bp,
1376 struct bnxt_filter_info *filter)
1378 struct bnxt_vnic_info *vnic = NULL;
1381 for (i = 0; i < bp->max_vnics; i++) {
1382 vnic = &bp->vnic_info[i];
1383 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1384 filter->dst_id == vnic->fw_vnic_id) {
1385 PMD_DRV_LOG(DEBUG, "Found matching VNIC Id %d\n",
1394 bnxt_flow_validate(struct rte_eth_dev *dev,
1395 const struct rte_flow_attr *attr,
1396 const struct rte_flow_item pattern[],
1397 const struct rte_flow_action actions[],
1398 struct rte_flow_error *error)
1400 struct bnxt *bp = dev->data->dev_private;
1401 struct bnxt_vnic_info *vnic = NULL;
1402 struct bnxt_filter_info *filter;
1405 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1409 filter = bnxt_get_unused_filter(bp);
1410 if (filter == NULL) {
1411 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1415 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1418 vnic = find_matching_vnic(bp, filter);
1420 if (STAILQ_EMPTY(&vnic->filter)) {
1421 rte_free(vnic->fw_grp_ids);
1422 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1423 bnxt_hwrm_vnic_free(bp, vnic);
1424 vnic->rx_queue_cnt = 0;
1426 PMD_DRV_LOG(DEBUG, "Free VNIC\n");
1430 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1431 bnxt_hwrm_clear_em_filter(bp, filter);
1432 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1433 bnxt_hwrm_clear_ntuple_filter(bp, filter);
1435 bnxt_hwrm_clear_l2_filter(bp, filter);
1437 /* No need to hold on to this filter if we are just validating flow */
1438 filter->fw_l2_filter_id = UINT64_MAX;
1439 bnxt_free_filter(bp, filter);
1445 bnxt_update_filter(struct bnxt *bp, struct bnxt_filter_info *old_filter,
1446 struct bnxt_filter_info *new_filter)
1448 /* Clear the new L2 filter that was created in the previous step in
1449 * bnxt_validate_and_parse_flow. For L2 filters, we will use the new
1450 * filter which points to the new destination queue and so we clear
1451 * the previous L2 filter. For ntuple filters, we are going to reuse
1452 * the old L2 filter and create new NTUPLE filter with this new
1453 * destination queue subsequently during bnxt_flow_create.
1455 if (new_filter->filter_type == HWRM_CFA_L2_FILTER) {
1456 bnxt_hwrm_clear_l2_filter(bp, old_filter);
1457 bnxt_hwrm_set_l2_filter(bp, new_filter->dst_id, new_filter);
1459 if (new_filter->filter_type == HWRM_CFA_EM_FILTER)
1460 bnxt_hwrm_clear_em_filter(bp, old_filter);
1461 if (new_filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1462 bnxt_hwrm_clear_ntuple_filter(bp, old_filter);
1467 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1469 struct bnxt_filter_info *mf;
1470 struct rte_flow *flow;
1473 for (i = bp->max_vnics - 1; i >= 0; i--) {
1474 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1476 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1479 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1482 if (mf->filter_type == nf->filter_type &&
1483 mf->flags == nf->flags &&
1484 mf->src_port == nf->src_port &&
1485 mf->src_port_mask == nf->src_port_mask &&
1486 mf->dst_port == nf->dst_port &&
1487 mf->dst_port_mask == nf->dst_port_mask &&
1488 mf->ip_protocol == nf->ip_protocol &&
1489 mf->ip_addr_type == nf->ip_addr_type &&
1490 mf->ethertype == nf->ethertype &&
1491 mf->vni == nf->vni &&
1492 mf->tunnel_type == nf->tunnel_type &&
1493 mf->l2_ovlan == nf->l2_ovlan &&
1494 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1495 mf->l2_ivlan == nf->l2_ivlan &&
1496 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1497 !memcmp(mf->l2_addr, nf->l2_addr,
1498 RTE_ETHER_ADDR_LEN) &&
1499 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1500 RTE_ETHER_ADDR_LEN) &&
1501 !memcmp(mf->src_macaddr, nf->src_macaddr,
1502 RTE_ETHER_ADDR_LEN) &&
1503 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1504 RTE_ETHER_ADDR_LEN) &&
1505 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1506 sizeof(nf->src_ipaddr)) &&
1507 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1508 sizeof(nf->src_ipaddr_mask)) &&
1509 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1510 sizeof(nf->dst_ipaddr)) &&
1511 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1512 sizeof(nf->dst_ipaddr_mask))) {
1513 if (mf->dst_id == nf->dst_id)
1515 /* Free the old filter, update flow
1518 bnxt_update_filter(bp, mf, nf);
1519 STAILQ_REMOVE(&vnic->filter, mf,
1520 bnxt_filter_info, next);
1521 STAILQ_INSERT_TAIL(&vnic->filter, nf, next);
1522 bnxt_free_filter(bp, mf);
1531 static struct rte_flow *
1532 bnxt_flow_create(struct rte_eth_dev *dev,
1533 const struct rte_flow_attr *attr,
1534 const struct rte_flow_item pattern[],
1535 const struct rte_flow_action actions[],
1536 struct rte_flow_error *error)
1538 struct bnxt *bp = dev->data->dev_private;
1539 struct bnxt_vnic_info *vnic = NULL;
1540 struct bnxt_filter_info *filter;
1541 bool update_flow = false;
1542 struct rte_flow *flow;
1546 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1547 rte_flow_error_set(error, EINVAL,
1548 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1549 "Failed to create flow, Not a Trusted VF!");
1553 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1555 rte_flow_error_set(error, ENOMEM,
1556 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1557 "Failed to allocate memory");
1561 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1563 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1567 filter = bnxt_get_unused_filter(bp);
1568 if (filter == NULL) {
1569 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1573 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1578 ret = bnxt_match_filter(bp, filter);
1579 if (ret == -EEXIST) {
1580 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1581 /* Clear the filter that was created as part of
1582 * validate_and_parse_flow() above
1584 bnxt_hwrm_clear_l2_filter(bp, filter);
1586 } else if (ret == -EXDEV) {
1587 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1588 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1592 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1593 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1596 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1597 filter->enables == filter->tunnel_type) {
1598 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1600 rte_flow_error_set(error, -ret,
1601 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602 "Unable to query tunnel to VF");
1605 if (tun_type == (1U << filter->tunnel_type)) {
1607 bnxt_hwrm_tunnel_redirect_free(bp,
1608 filter->tunnel_type);
1611 "Unable to free existing tunnel\n");
1612 rte_flow_error_set(error, -ret,
1613 RTE_FLOW_ERROR_TYPE_HANDLE,
1615 "Unable to free preexisting "
1620 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1622 rte_flow_error_set(error, -ret,
1623 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1624 "Unable to redirect tunnel to VF");
1627 vnic = &bp->vnic_info[0];
1631 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1633 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1634 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1637 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1639 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1640 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1643 vnic = find_matching_vnic(bp, filter);
1645 if (!ret || update_flow) {
1646 flow->filter = filter;
1648 /* VNIC is set only in case of queue or RSS action */
1651 * RxQ0 is not used for flow filters.
1658 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1660 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1661 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1665 flow->filter = filter;
1671 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1672 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1676 bnxt_free_filter(bp, filter);
1679 rte_flow_error_set(error, ret,
1680 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1681 "Matching Flow exists.");
1682 else if (ret == -EXDEV)
1683 rte_flow_error_set(error, 0,
1684 RTE_FLOW_ERROR_TYPE_NONE, NULL,
1685 "Flow with pattern exists, updating destination queue");
1687 rte_flow_error_set(error, -ret,
1688 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1689 "Failed to create flow.");
1695 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1696 struct bnxt_filter_info *filter,
1697 struct rte_flow_error *error)
1699 uint16_t tun_dst_fid;
1703 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1705 rte_flow_error_set(error, -ret,
1706 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1707 "Unable to query tunnel to VF");
1710 if (tun_type == (1U << filter->tunnel_type)) {
1711 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1714 rte_flow_error_set(error, -ret,
1715 RTE_FLOW_ERROR_TYPE_HANDLE,
1717 "tunnel_redirect info cmd fail");
1720 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1721 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1723 /* Tunnel doesn't belong to this VF, so don't send HWRM
1724 * cmd, just delete the flow from driver
1726 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1728 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1730 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1731 filter->tunnel_type);
1737 bnxt_flow_destroy(struct rte_eth_dev *dev,
1738 struct rte_flow *flow,
1739 struct rte_flow_error *error)
1741 struct bnxt *bp = dev->data->dev_private;
1742 struct bnxt_filter_info *filter = flow->filter;
1743 struct bnxt_vnic_info *vnic = flow->vnic;
1751 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1752 filter->enables == filter->tunnel_type) {
1753 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1762 ret = bnxt_match_filter(bp, filter);
1764 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1766 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1767 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1768 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1769 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1770 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1774 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1775 bnxt_free_filter(bp, filter);
1776 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1779 /* If this was the last flow associated with this vnic,
1780 * switch the queue back to RSS pool.
1782 if (vnic && STAILQ_EMPTY(&vnic->flow_list)) {
1783 rte_free(vnic->fw_grp_ids);
1784 if (vnic->rx_queue_cnt > 1)
1785 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1787 bnxt_hwrm_vnic_free(bp, vnic);
1788 vnic->rx_queue_cnt = 0;
1792 rte_flow_error_set(error, -ret,
1793 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1794 "Failed to destroy flow.");
1801 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1803 struct bnxt *bp = dev->data->dev_private;
1804 struct bnxt_vnic_info *vnic;
1805 struct rte_flow *flow;
1809 for (i = 0; i < bp->max_vnics; i++) {
1810 vnic = &bp->vnic_info[i];
1811 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1814 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1815 struct bnxt_filter_info *filter = flow->filter;
1817 if (filter->filter_type ==
1818 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1819 filter->enables == filter->tunnel_type) {
1821 bnxt_handle_tunnel_redirect_destroy(bp,
1830 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1831 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1832 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1833 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1835 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1841 RTE_FLOW_ERROR_TYPE_HANDLE,
1843 "Failed to flush flow in HW.");
1847 bnxt_free_filter(bp, filter);
1848 STAILQ_REMOVE(&vnic->flow_list, flow,
1857 const struct rte_flow_ops bnxt_flow_ops = {
1858 .validate = bnxt_flow_validate,
1859 .create = bnxt_flow_create,
1860 .destroy = bnxt_flow_destroy,
1861 .flush = bnxt_flow_flush,