1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
24 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 struct bnxt_filter_info *filter;
28 /* Find the 1st unused filter from the free_filter_list pool*/
29 filter = STAILQ_FIRST(&bp->free_filter_list);
31 PMD_DRV_LOG(ERR, "No more free filter resources\n");
34 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36 /* Default to L2 MAC Addr filter */
37 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
38 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
39 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
40 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
46 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 struct bnxt_filter_info *filter;
50 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
57 filter->fw_l2_filter_id = UINT64_MAX;
58 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
62 void bnxt_init_filters(struct bnxt *bp)
64 struct bnxt_filter_info *filter;
67 max_filters = bp->max_l2_ctx;
68 STAILQ_INIT(&bp->free_filter_list);
69 for (i = 0; i < max_filters; i++) {
70 filter = &bp->filter_info[i];
71 filter->fw_l2_filter_id = -1;
72 filter->fw_em_filter_id = -1;
73 filter->fw_ntuple_filter_id = -1;
74 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
78 void bnxt_free_all_filters(struct bnxt *bp)
80 struct bnxt_vnic_info *vnic;
81 struct bnxt_filter_info *filter, *temp_filter;
84 for (i = 0; i < MAX_FF_POOLS; i++) {
85 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
86 filter = STAILQ_FIRST(&vnic->filter);
88 temp_filter = STAILQ_NEXT(filter, next);
89 STAILQ_REMOVE(&vnic->filter, filter,
90 bnxt_filter_info, next);
91 STAILQ_INSERT_TAIL(&bp->free_filter_list,
95 STAILQ_INIT(&vnic->filter);
99 for (i = 0; i < bp->pf.max_vfs; i++) {
100 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
101 bnxt_hwrm_clear_l2_filter(bp, filter);
106 void bnxt_free_filter_mem(struct bnxt *bp)
108 struct bnxt_filter_info *filter;
109 uint16_t max_filters, i;
112 if (bp->filter_info == NULL)
115 /* Ensure that all filters are freed */
116 max_filters = bp->max_l2_ctx;
117 for (i = 0; i < max_filters; i++) {
118 filter = &bp->filter_info[i];
119 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
120 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
121 /* Call HWRM to try to free filter again */
122 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
125 "HWRM filter cannot be freed rc = %d\n",
128 filter->fw_l2_filter_id = UINT64_MAX;
130 STAILQ_INIT(&bp->free_filter_list);
132 rte_free(bp->filter_info);
133 bp->filter_info = NULL;
135 for (i = 0; i < bp->pf.max_vfs; i++) {
136 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
138 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
139 bnxt_filter_info, next);
144 int bnxt_alloc_filter_mem(struct bnxt *bp)
146 struct bnxt_filter_info *filter_mem;
147 uint16_t max_filters;
149 max_filters = bp->max_l2_ctx;
150 /* Allocate memory for VNIC pool and filter pool */
151 filter_mem = rte_zmalloc("bnxt_filter_info",
152 max_filters * sizeof(struct bnxt_filter_info),
154 if (filter_mem == NULL) {
155 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
159 bp->filter_info = filter_mem;
163 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
165 struct bnxt_filter_info *filter;
167 /* Find the 1st unused filter from the free_filter_list pool*/
168 filter = STAILQ_FIRST(&bp->free_filter_list);
170 PMD_DRV_LOG(ERR, "No more free filter resources\n");
173 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
178 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
180 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
184 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
185 const struct rte_flow_item pattern[],
186 const struct rte_flow_action actions[],
187 struct rte_flow_error *error)
190 rte_flow_error_set(error, EINVAL,
191 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
192 NULL, "NULL pattern.");
197 rte_flow_error_set(error, EINVAL,
198 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
199 NULL, "NULL action.");
204 rte_flow_error_set(error, EINVAL,
205 RTE_FLOW_ERROR_TYPE_ATTR,
206 NULL, "NULL attribute.");
213 static const struct rte_flow_item *
214 nxt_non_void_pattern(const struct rte_flow_item *cur)
217 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
223 static const struct rte_flow_action *
224 nxt_non_void_action(const struct rte_flow_action *cur)
227 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
233 int check_zero_bytes(const uint8_t *bytes, int len)
236 for (i = 0; i < len; i++)
237 if (bytes[i] != 0x00)
243 bnxt_filter_type_check(const struct rte_flow_item pattern[],
244 struct rte_flow_error *error __rte_unused)
246 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
249 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
250 switch (item->type) {
251 case RTE_FLOW_ITEM_TYPE_ETH:
254 case RTE_FLOW_ITEM_TYPE_VLAN:
257 case RTE_FLOW_ITEM_TYPE_IPV4:
258 case RTE_FLOW_ITEM_TYPE_IPV6:
259 case RTE_FLOW_ITEM_TYPE_TCP:
260 case RTE_FLOW_ITEM_TYPE_UDP:
262 /* need ntuple match, reset exact match */
265 "VLAN flow cannot use NTUPLE filter\n");
266 rte_flow_error_set(error, EINVAL,
267 RTE_FLOW_ERROR_TYPE_ITEM,
269 "Cannot use VLAN with NTUPLE");
275 PMD_DRV_LOG(ERR, "Unknown Flow type");
284 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
285 const struct rte_flow_item pattern[],
286 struct rte_flow_error *error,
287 struct bnxt_filter_info *filter)
289 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
290 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
291 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
292 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
293 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
294 const struct rte_flow_item_udp *udp_spec, *udp_mask;
295 const struct rte_flow_item_eth *eth_spec, *eth_mask;
296 const struct rte_flow_item_nvgre *nvgre_spec;
297 const struct rte_flow_item_nvgre *nvgre_mask;
298 const struct rte_flow_item_vxlan *vxlan_spec;
299 const struct rte_flow_item_vxlan *vxlan_mask;
300 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
301 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
302 const struct rte_flow_item_vf *vf_spec;
303 uint32_t tenant_id_be = 0;
311 use_ntuple = bnxt_filter_type_check(pattern, error);
312 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
316 filter->filter_type = use_ntuple ?
317 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
319 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
321 /* last or range is NOT supported as match criteria */
322 rte_flow_error_set(error, EINVAL,
323 RTE_FLOW_ERROR_TYPE_ITEM,
325 "No support for range");
328 if (!item->spec || !item->mask) {
329 rte_flow_error_set(error, EINVAL,
330 RTE_FLOW_ERROR_TYPE_ITEM,
332 "spec/mask is NULL");
335 switch (item->type) {
336 case RTE_FLOW_ITEM_TYPE_ETH:
337 eth_spec = item->spec;
338 eth_mask = item->mask;
340 /* Source MAC address mask cannot be partially set.
341 * Should be All 0's or all 1's.
342 * Destination MAC address mask must not be partially
343 * set. Should be all 1's or all 0's.
345 if ((!is_zero_ether_addr(ð_mask->src) &&
346 !is_broadcast_ether_addr(ð_mask->src)) ||
347 (!is_zero_ether_addr(ð_mask->dst) &&
348 !is_broadcast_ether_addr(ð_mask->dst))) {
349 rte_flow_error_set(error, EINVAL,
350 RTE_FLOW_ERROR_TYPE_ITEM,
352 "MAC_addr mask not valid");
356 /* Mask is not allowed. Only exact matches are */
357 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
358 rte_flow_error_set(error, EINVAL,
359 RTE_FLOW_ERROR_TYPE_ITEM,
361 "ethertype mask not valid");
365 if (is_broadcast_ether_addr(ð_mask->dst)) {
366 rte_memcpy(filter->dst_macaddr,
369 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
370 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
372 if (is_broadcast_ether_addr(ð_mask->src)) {
373 rte_memcpy(filter->src_macaddr,
376 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
377 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
380 * RTE_LOG(ERR, PMD, "Handle this condition\n");
383 if (eth_spec->type) {
385 rte_be_to_cpu_16(eth_spec->type);
387 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
388 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
392 case RTE_FLOW_ITEM_TYPE_VLAN:
393 vlan_spec = item->spec;
394 vlan_mask = item->mask;
395 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
396 /* Only the VLAN ID can be matched. */
398 rte_be_to_cpu_16(vlan_spec->tci &
400 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
402 rte_flow_error_set(error, EINVAL,
403 RTE_FLOW_ERROR_TYPE_ITEM,
405 "VLAN mask is invalid");
410 case RTE_FLOW_ITEM_TYPE_IPV4:
411 /* If mask is not involved, we could use EM filters. */
412 ipv4_spec = item->spec;
413 ipv4_mask = item->mask;
414 /* Only IP DST and SRC fields are maskable. */
415 if (ipv4_mask->hdr.version_ihl ||
416 ipv4_mask->hdr.type_of_service ||
417 ipv4_mask->hdr.total_length ||
418 ipv4_mask->hdr.packet_id ||
419 ipv4_mask->hdr.fragment_offset ||
420 ipv4_mask->hdr.time_to_live ||
421 ipv4_mask->hdr.next_proto_id ||
422 ipv4_mask->hdr.hdr_checksum) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
426 "Invalid IPv4 mask.");
429 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
430 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
432 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
433 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
435 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
436 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
437 if (ipv4_mask->hdr.src_addr) {
438 filter->src_ipaddr_mask[0] =
439 ipv4_mask->hdr.src_addr;
440 en |= !use_ntuple ? 0 :
441 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
443 if (ipv4_mask->hdr.dst_addr) {
444 filter->dst_ipaddr_mask[0] =
445 ipv4_mask->hdr.dst_addr;
446 en |= !use_ntuple ? 0 :
447 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
449 filter->ip_addr_type = use_ntuple ?
450 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
451 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
452 if (ipv4_spec->hdr.next_proto_id) {
453 filter->ip_protocol =
454 ipv4_spec->hdr.next_proto_id;
456 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
458 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
461 case RTE_FLOW_ITEM_TYPE_IPV6:
462 ipv6_spec = item->spec;
463 ipv6_mask = item->mask;
465 /* Only IP DST and SRC fields are maskable. */
466 if (ipv6_mask->hdr.vtc_flow ||
467 ipv6_mask->hdr.payload_len ||
468 ipv6_mask->hdr.proto ||
469 ipv6_mask->hdr.hop_limits) {
470 rte_flow_error_set(error, EINVAL,
471 RTE_FLOW_ERROR_TYPE_ITEM,
473 "Invalid IPv6 mask.");
478 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
479 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
481 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
482 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
483 rte_memcpy(filter->src_ipaddr,
484 ipv6_spec->hdr.src_addr, 16);
485 rte_memcpy(filter->dst_ipaddr,
486 ipv6_spec->hdr.dst_addr, 16);
487 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
488 rte_memcpy(filter->src_ipaddr_mask,
489 ipv6_mask->hdr.src_addr, 16);
490 en |= !use_ntuple ? 0 :
491 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
493 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
494 rte_memcpy(filter->dst_ipaddr_mask,
495 ipv6_mask->hdr.dst_addr, 16);
496 en |= !use_ntuple ? 0 :
497 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
499 filter->ip_addr_type = use_ntuple ?
500 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
501 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
503 case RTE_FLOW_ITEM_TYPE_TCP:
504 tcp_spec = item->spec;
505 tcp_mask = item->mask;
507 /* Check TCP mask. Only DST & SRC ports are maskable */
508 if (tcp_mask->hdr.sent_seq ||
509 tcp_mask->hdr.recv_ack ||
510 tcp_mask->hdr.data_off ||
511 tcp_mask->hdr.tcp_flags ||
512 tcp_mask->hdr.rx_win ||
513 tcp_mask->hdr.cksum ||
514 tcp_mask->hdr.tcp_urp) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
521 filter->src_port = tcp_spec->hdr.src_port;
522 filter->dst_port = tcp_spec->hdr.dst_port;
524 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
525 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
527 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
528 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
529 if (tcp_mask->hdr.dst_port) {
530 filter->dst_port_mask = tcp_mask->hdr.dst_port;
531 en |= !use_ntuple ? 0 :
532 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
534 if (tcp_mask->hdr.src_port) {
535 filter->src_port_mask = tcp_mask->hdr.src_port;
536 en |= !use_ntuple ? 0 :
537 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
540 case RTE_FLOW_ITEM_TYPE_UDP:
541 udp_spec = item->spec;
542 udp_mask = item->mask;
544 if (udp_mask->hdr.dgram_len ||
545 udp_mask->hdr.dgram_cksum) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM,
553 filter->src_port = udp_spec->hdr.src_port;
554 filter->dst_port = udp_spec->hdr.dst_port;
556 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
557 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
559 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
560 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
562 if (udp_mask->hdr.dst_port) {
563 filter->dst_port_mask = udp_mask->hdr.dst_port;
564 en |= !use_ntuple ? 0 :
565 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
567 if (udp_mask->hdr.src_port) {
568 filter->src_port_mask = udp_mask->hdr.src_port;
569 en |= !use_ntuple ? 0 :
570 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
573 case RTE_FLOW_ITEM_TYPE_VXLAN:
574 vxlan_spec = item->spec;
575 vxlan_mask = item->mask;
576 /* Check if VXLAN item is used to describe protocol.
577 * If yes, both spec and mask should be NULL.
578 * If no, both spec and mask shouldn't be NULL.
580 if ((!vxlan_spec && vxlan_mask) ||
581 (vxlan_spec && !vxlan_mask)) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ITEM,
585 "Invalid VXLAN item");
589 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
590 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
591 vxlan_spec->flags != 0x8) {
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ITEM,
595 "Invalid VXLAN item");
599 /* Check if VNI is masked. */
600 if (vxlan_spec && vxlan_mask) {
602 !!memcmp(vxlan_mask->vni, vni_mask,
605 rte_flow_error_set(error, EINVAL,
606 RTE_FLOW_ERROR_TYPE_ITEM,
612 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
615 rte_be_to_cpu_32(tenant_id_be);
616 filter->tunnel_type =
617 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
620 case RTE_FLOW_ITEM_TYPE_NVGRE:
621 nvgre_spec = item->spec;
622 nvgre_mask = item->mask;
623 /* Check if NVGRE item is used to describe protocol.
624 * If yes, both spec and mask should be NULL.
625 * If no, both spec and mask shouldn't be NULL.
627 if ((!nvgre_spec && nvgre_mask) ||
628 (nvgre_spec && !nvgre_mask)) {
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ITEM,
632 "Invalid NVGRE item");
636 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
637 nvgre_spec->protocol != 0x6558) {
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ITEM,
641 "Invalid NVGRE item");
645 if (nvgre_spec && nvgre_mask) {
647 !!memcmp(nvgre_mask->tni, tni_mask,
650 rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ITEM,
656 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
659 rte_be_to_cpu_32(tenant_id_be);
660 filter->tunnel_type =
661 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
664 case RTE_FLOW_ITEM_TYPE_VF:
665 vf_spec = item->spec;
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ITEM,
671 "Configuring on a VF!");
675 if (vf >= bp->pdev->max_vfs) {
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ITEM,
683 filter->mirror_vnic_id =
684 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
686 /* This simply indicates there's no driver
687 * loaded. This is not an error.
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM,
692 "Unable to get default VNIC for VF");
695 filter->mirror_vnic_id = dflt_vnic;
696 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
703 filter->enables = en;
708 /* Parse attributes */
710 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
711 struct rte_flow_error *error)
713 /* Must be input direction */
714 if (!attr->ingress) {
715 rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
717 attr, "Only support ingress.");
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
725 attr, "No support for egress.");
730 if (attr->priority) {
731 rte_flow_error_set(error, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
733 attr, "No support for priority.");
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
741 attr, "No support for group.");
748 struct bnxt_filter_info *
749 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
750 struct bnxt_vnic_info *vnic)
752 struct bnxt_filter_info *filter1, *f0;
753 struct bnxt_vnic_info *vnic0;
756 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
757 f0 = STAILQ_FIRST(&vnic0->filter);
759 //This flow has same DST MAC as the port/l2 filter.
760 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
763 //This flow needs DST MAC which is not same as port/l2
764 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
765 filter1 = bnxt_get_unused_filter(bp);
768 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
769 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
770 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
771 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
772 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
773 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
776 bnxt_free_filter(bp, filter1);
783 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
784 const struct rte_flow_item pattern[],
785 const struct rte_flow_action actions[],
786 const struct rte_flow_attr *attr,
787 struct rte_flow_error *error,
788 struct bnxt_filter_info *filter)
790 const struct rte_flow_action *act = nxt_non_void_action(actions);
791 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
792 const struct rte_flow_action_queue *act_q;
793 const struct rte_flow_action_vf *act_vf;
794 struct bnxt_vnic_info *vnic, *vnic0;
795 struct bnxt_filter_info *filter1;
800 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
801 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
804 "Cannot create flow on RSS queues");
809 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
813 rc = bnxt_flow_parse_attr(attr, error);
816 //Since we support ingress attribute only - right now.
817 if (filter->filter_type == HWRM_CFA_EM_FILTER)
818 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
821 case RTE_FLOW_ACTION_TYPE_QUEUE:
822 /* Allow this flow. Redirect to a VNIC. */
823 act_q = (const struct rte_flow_action_queue *)act->conf;
824 if (act_q->index >= bp->rx_nr_rings) {
825 rte_flow_error_set(error, EINVAL,
826 RTE_FLOW_ERROR_TYPE_ACTION, act,
827 "Invalid queue ID.");
831 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
833 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
834 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_ACTION, act,
838 "No matching VNIC for queue ID.");
842 filter->dst_id = vnic->fw_vnic_id;
843 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
844 if (filter1 == NULL) {
848 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
849 PMD_DRV_LOG(DEBUG, "VNIC found\n");
851 case RTE_FLOW_ACTION_TYPE_DROP:
852 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
853 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
854 if (filter1 == NULL) {
858 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
859 if (filter->filter_type == HWRM_CFA_EM_FILTER)
861 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
864 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
866 case RTE_FLOW_ACTION_TYPE_COUNT:
867 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
868 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
869 if (filter1 == NULL) {
873 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
874 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
876 case RTE_FLOW_ACTION_TYPE_VF:
877 act_vf = (const struct rte_flow_action_vf *)act->conf;
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION,
883 "Configuring on a VF!");
888 if (vf >= bp->pdev->max_vfs) {
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ACTION,
897 filter->mirror_vnic_id =
898 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
900 /* This simply indicates there's no driver loaded.
901 * This is not an error.
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ACTION,
906 "Unable to get default VNIC for VF");
910 filter->mirror_vnic_id = dflt_vnic;
911 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
913 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
914 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
915 if (filter1 == NULL) {
919 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ACTION, act,
930 act = nxt_non_void_action(++act);
931 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION,
934 act, "Invalid action.");
943 bnxt_flow_validate(struct rte_eth_dev *dev,
944 const struct rte_flow_attr *attr,
945 const struct rte_flow_item pattern[],
946 const struct rte_flow_action actions[],
947 struct rte_flow_error *error)
949 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
950 struct bnxt_filter_info *filter;
953 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
957 filter = bnxt_get_unused_filter(bp);
958 if (filter == NULL) {
959 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
963 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
965 /* No need to hold on to this filter if we are just validating flow */
966 filter->fw_l2_filter_id = -1;
967 bnxt_free_filter(bp, filter);
973 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
975 struct bnxt_filter_info *mf;
976 struct rte_flow *flow;
979 for (i = bp->nr_vnics - 1; i >= 0; i--) {
980 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
982 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
985 if (mf->filter_type == nf->filter_type &&
986 mf->flags == nf->flags &&
987 mf->src_port == nf->src_port &&
988 mf->src_port_mask == nf->src_port_mask &&
989 mf->dst_port == nf->dst_port &&
990 mf->dst_port_mask == nf->dst_port_mask &&
991 mf->ip_protocol == nf->ip_protocol &&
992 mf->ip_addr_type == nf->ip_addr_type &&
993 mf->ethertype == nf->ethertype &&
994 mf->vni == nf->vni &&
995 mf->tunnel_type == nf->tunnel_type &&
996 mf->l2_ovlan == nf->l2_ovlan &&
997 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
998 mf->l2_ivlan == nf->l2_ivlan &&
999 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1000 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1001 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1003 !memcmp(mf->src_macaddr, nf->src_macaddr,
1005 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1007 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1008 sizeof(nf->src_ipaddr)) &&
1009 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1010 sizeof(nf->src_ipaddr_mask)) &&
1011 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1012 sizeof(nf->dst_ipaddr)) &&
1013 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1014 sizeof(nf->dst_ipaddr_mask))) {
1015 if (mf->dst_id == nf->dst_id)
1017 /* Same Flow, Different queue
1018 * Clear the old ntuple filter
1020 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1021 bnxt_hwrm_clear_em_filter(bp, mf);
1022 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1023 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1024 /* Free the old filter, update flow
1027 bnxt_free_filter(bp, mf);
1036 static struct rte_flow *
1037 bnxt_flow_create(struct rte_eth_dev *dev,
1038 const struct rte_flow_attr *attr,
1039 const struct rte_flow_item pattern[],
1040 const struct rte_flow_action actions[],
1041 struct rte_flow_error *error)
1043 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1044 struct bnxt_filter_info *filter;
1045 struct bnxt_vnic_info *vnic = NULL;
1046 bool update_flow = false;
1047 struct rte_flow *flow;
1051 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1053 rte_flow_error_set(error, ENOMEM,
1054 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1055 "Failed to allocate memory");
1059 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1061 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1065 filter = bnxt_get_unused_filter(bp);
1066 if (filter == NULL) {
1067 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1071 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1076 ret = bnxt_match_filter(bp, filter);
1077 if (ret == -EEXIST) {
1078 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1079 /* Clear the filter that was created as part of
1080 * validate_and_parse_flow() above
1082 bnxt_hwrm_clear_l2_filter(bp, filter);
1084 } else if (ret == -EXDEV) {
1085 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1086 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1090 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1092 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1093 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1095 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1097 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1098 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1101 for (i = 0; i < bp->nr_vnics; i++) {
1102 vnic = &bp->vnic_info[i];
1103 if (filter->dst_id == vnic->fw_vnic_id)
1108 flow->filter = filter;
1114 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1115 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1119 bnxt_free_filter(bp, filter);
1122 rte_flow_error_set(error, ret,
1123 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1124 "Matching Flow exists.");
1125 else if (ret == -EXDEV)
1126 rte_flow_error_set(error, ret,
1127 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128 "Flow with pattern exists, updating destination queue");
1130 rte_flow_error_set(error, -ret,
1131 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1132 "Failed to create flow.");
1139 bnxt_flow_destroy(struct rte_eth_dev *dev,
1140 struct rte_flow *flow,
1141 struct rte_flow_error *error)
1143 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1144 struct bnxt_filter_info *filter = flow->filter;
1145 struct bnxt_vnic_info *vnic = flow->vnic;
1148 ret = bnxt_match_filter(bp, filter);
1150 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1151 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1152 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1153 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1154 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1156 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1158 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1161 rte_flow_error_set(error, -ret,
1162 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1163 "Failed to destroy flow.");
1170 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1172 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1173 struct bnxt_vnic_info *vnic;
1174 struct rte_flow *flow;
1178 for (i = 0; i < bp->nr_vnics; i++) {
1179 vnic = &bp->vnic_info[i];
1180 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1181 struct bnxt_filter_info *filter = flow->filter;
1183 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1184 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1185 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1186 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1189 rte_flow_error_set(error, -ret,
1190 RTE_FLOW_ERROR_TYPE_HANDLE,
1192 "Failed to flush flow in HW.");
1196 STAILQ_REMOVE(&vnic->flow_list, flow,
1205 const struct rte_flow_ops bnxt_flow_ops = {
1206 .validate = bnxt_flow_validate,
1207 .create = bnxt_flow_create,
1208 .destroy = bnxt_flow_destroy,
1209 .flush = bnxt_flow_flush,