1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_util.h"
19 #include "bnxt_vnic.h"
20 #include "hsi_struct_def_dpdk.h"
26 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
28 struct bnxt_filter_info *filter;
30 /* Find the 1st unused filter from the free_filter_list pool*/
31 filter = STAILQ_FIRST(&bp->free_filter_list);
33 PMD_DRV_LOG(ERR, "No more free filter resources\n");
36 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
38 /* Default to L2 MAC Addr filter */
39 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
40 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
41 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
42 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
44 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
48 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
50 struct bnxt_filter_info *filter;
52 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
54 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
59 filter->fw_l2_filter_id = UINT64_MAX;
60 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
64 void bnxt_init_filters(struct bnxt *bp)
66 struct bnxt_filter_info *filter;
69 max_filters = bp->max_l2_ctx;
70 STAILQ_INIT(&bp->free_filter_list);
71 for (i = 0; i < max_filters; i++) {
72 filter = &bp->filter_info[i];
73 filter->fw_l2_filter_id = UINT64_MAX;
74 filter->fw_em_filter_id = UINT64_MAX;
75 filter->fw_ntuple_filter_id = UINT64_MAX;
76 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
80 void bnxt_free_all_filters(struct bnxt *bp)
82 struct bnxt_vnic_info *vnic;
83 struct bnxt_filter_info *filter, *temp_filter;
86 for (i = 0; i < MAX_FF_POOLS; i++) {
87 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
88 filter = STAILQ_FIRST(&vnic->filter);
90 temp_filter = STAILQ_NEXT(filter, next);
91 STAILQ_REMOVE(&vnic->filter, filter,
92 bnxt_filter_info, next);
93 STAILQ_INSERT_TAIL(&bp->free_filter_list,
97 STAILQ_INIT(&vnic->filter);
101 for (i = 0; i < bp->pf.max_vfs; i++) {
102 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
103 bnxt_hwrm_clear_l2_filter(bp, filter);
108 void bnxt_free_filter_mem(struct bnxt *bp)
110 struct bnxt_filter_info *filter;
111 uint16_t max_filters, i;
114 if (bp->filter_info == NULL)
117 /* Ensure that all filters are freed */
118 max_filters = bp->max_l2_ctx;
119 for (i = 0; i < max_filters; i++) {
120 filter = &bp->filter_info[i];
121 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
122 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
123 /* Call HWRM to try to free filter again */
124 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
127 "HWRM filter cannot be freed rc = %d\n",
130 filter->fw_l2_filter_id = UINT64_MAX;
132 STAILQ_INIT(&bp->free_filter_list);
134 rte_free(bp->filter_info);
135 bp->filter_info = NULL;
137 for (i = 0; i < bp->pf.max_vfs; i++) {
138 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
140 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
141 bnxt_filter_info, next);
146 int bnxt_alloc_filter_mem(struct bnxt *bp)
148 struct bnxt_filter_info *filter_mem;
149 uint16_t max_filters;
151 max_filters = bp->max_l2_ctx;
152 /* Allocate memory for VNIC pool and filter pool */
153 filter_mem = rte_zmalloc("bnxt_filter_info",
154 max_filters * sizeof(struct bnxt_filter_info),
156 if (filter_mem == NULL) {
157 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
161 bp->filter_info = filter_mem;
165 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
167 struct bnxt_filter_info *filter;
169 /* Find the 1st unused filter from the free_filter_list pool*/
170 filter = STAILQ_FIRST(&bp->free_filter_list);
172 PMD_DRV_LOG(ERR, "No more free filter resources\n");
175 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
180 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
182 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
186 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
187 const struct rte_flow_item pattern[],
188 const struct rte_flow_action actions[],
189 struct rte_flow_error *error)
192 rte_flow_error_set(error, EINVAL,
193 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
194 NULL, "NULL pattern.");
199 rte_flow_error_set(error, EINVAL,
200 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
201 NULL, "NULL action.");
206 rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ATTR,
208 NULL, "NULL attribute.");
215 static const struct rte_flow_item *
216 nxt_non_void_pattern(const struct rte_flow_item *cur)
219 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
225 static const struct rte_flow_action *
226 nxt_non_void_action(const struct rte_flow_action *cur)
229 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
236 bnxt_filter_type_check(const struct rte_flow_item pattern[],
237 struct rte_flow_error *error __rte_unused)
239 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
242 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
243 switch (item->type) {
244 case RTE_FLOW_ITEM_TYPE_ETH:
247 case RTE_FLOW_ITEM_TYPE_VLAN:
250 case RTE_FLOW_ITEM_TYPE_IPV4:
251 case RTE_FLOW_ITEM_TYPE_IPV6:
252 case RTE_FLOW_ITEM_TYPE_TCP:
253 case RTE_FLOW_ITEM_TYPE_UDP:
255 /* need ntuple match, reset exact match */
258 "VLAN flow cannot use NTUPLE filter\n");
259 rte_flow_error_set(error, EINVAL,
260 RTE_FLOW_ERROR_TYPE_ITEM,
262 "Cannot use VLAN with NTUPLE");
268 PMD_DRV_LOG(ERR, "Unknown Flow type");
277 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
278 const struct rte_flow_attr *attr,
279 const struct rte_flow_item pattern[],
280 struct rte_flow_error *error,
281 struct bnxt_filter_info *filter)
283 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
284 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
285 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
286 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
287 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
288 const struct rte_flow_item_udp *udp_spec, *udp_mask;
289 const struct rte_flow_item_eth *eth_spec, *eth_mask;
290 const struct rte_flow_item_nvgre *nvgre_spec;
291 const struct rte_flow_item_nvgre *nvgre_mask;
292 const struct rte_flow_item_vxlan *vxlan_spec;
293 const struct rte_flow_item_vxlan *vxlan_mask;
294 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
295 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
296 const struct rte_flow_item_vf *vf_spec;
297 uint32_t tenant_id_be = 0;
303 uint32_t en_ethertype;
306 use_ntuple = bnxt_filter_type_check(pattern, error);
307 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
311 filter->filter_type = use_ntuple ?
312 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
313 en_ethertype = use_ntuple ?
314 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
315 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
317 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
319 /* last or range is NOT supported as match criteria */
320 rte_flow_error_set(error, EINVAL,
321 RTE_FLOW_ERROR_TYPE_ITEM,
323 "No support for range");
326 if (!item->spec || !item->mask) {
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM,
330 "spec/mask is NULL");
333 switch (item->type) {
334 case RTE_FLOW_ITEM_TYPE_ETH:
335 eth_spec = item->spec;
336 eth_mask = item->mask;
338 /* Source MAC address mask cannot be partially set.
339 * Should be All 0's or all 1's.
340 * Destination MAC address mask must not be partially
341 * set. Should be all 1's or all 0's.
343 if ((!is_zero_ether_addr(ð_mask->src) &&
344 !is_broadcast_ether_addr(ð_mask->src)) ||
345 (!is_zero_ether_addr(ð_mask->dst) &&
346 !is_broadcast_ether_addr(ð_mask->dst))) {
347 rte_flow_error_set(error, EINVAL,
348 RTE_FLOW_ERROR_TYPE_ITEM,
350 "MAC_addr mask not valid");
354 /* Mask is not allowed. Only exact matches are */
355 if (eth_mask->type &&
356 eth_mask->type != RTE_BE16(0xffff)) {
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
360 "ethertype mask not valid");
364 if (is_broadcast_ether_addr(ð_mask->dst)) {
365 rte_memcpy(filter->dst_macaddr,
368 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
369 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
371 if (is_broadcast_ether_addr(ð_mask->src)) {
372 rte_memcpy(filter->src_macaddr,
375 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
376 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
379 * RTE_LOG(ERR, PMD, "Handle this condition\n");
382 if (eth_mask->type) {
384 rte_be_to_cpu_16(eth_spec->type);
389 case RTE_FLOW_ITEM_TYPE_VLAN:
390 vlan_spec = item->spec;
391 vlan_mask = item->mask;
392 if (en & en_ethertype) {
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM,
396 "VLAN TPID matching is not"
400 if (vlan_mask->tci &&
401 vlan_mask->tci == RTE_BE16(0x0fff)) {
402 /* Only the VLAN ID can be matched. */
404 rte_be_to_cpu_16(vlan_spec->tci &
406 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
407 } else if (vlan_mask->tci) {
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ITEM,
411 "VLAN mask is invalid");
414 if (vlan_mask->inner_type &&
415 vlan_mask->inner_type != RTE_BE16(0xffff)) {
416 rte_flow_error_set(error, EINVAL,
417 RTE_FLOW_ERROR_TYPE_ITEM,
419 "inner ethertype mask not"
423 if (vlan_mask->inner_type) {
425 rte_be_to_cpu_16(vlan_spec->inner_type);
430 case RTE_FLOW_ITEM_TYPE_IPV4:
431 /* If mask is not involved, we could use EM filters. */
432 ipv4_spec = item->spec;
433 ipv4_mask = item->mask;
434 /* Only IP DST and SRC fields are maskable. */
435 if (ipv4_mask->hdr.version_ihl ||
436 ipv4_mask->hdr.type_of_service ||
437 ipv4_mask->hdr.total_length ||
438 ipv4_mask->hdr.packet_id ||
439 ipv4_mask->hdr.fragment_offset ||
440 ipv4_mask->hdr.time_to_live ||
441 ipv4_mask->hdr.next_proto_id ||
442 ipv4_mask->hdr.hdr_checksum) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM,
446 "Invalid IPv4 mask.");
449 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
450 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
452 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
453 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
455 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
456 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
457 if (ipv4_mask->hdr.src_addr) {
458 filter->src_ipaddr_mask[0] =
459 ipv4_mask->hdr.src_addr;
460 en |= !use_ntuple ? 0 :
461 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
463 if (ipv4_mask->hdr.dst_addr) {
464 filter->dst_ipaddr_mask[0] =
465 ipv4_mask->hdr.dst_addr;
466 en |= !use_ntuple ? 0 :
467 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
469 filter->ip_addr_type = use_ntuple ?
470 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
471 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
472 if (ipv4_spec->hdr.next_proto_id) {
473 filter->ip_protocol =
474 ipv4_spec->hdr.next_proto_id;
476 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
478 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
481 case RTE_FLOW_ITEM_TYPE_IPV6:
482 ipv6_spec = item->spec;
483 ipv6_mask = item->mask;
485 /* Only IP DST and SRC fields are maskable. */
486 if (ipv6_mask->hdr.vtc_flow ||
487 ipv6_mask->hdr.payload_len ||
488 ipv6_mask->hdr.proto ||
489 ipv6_mask->hdr.hop_limits) {
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ITEM,
493 "Invalid IPv6 mask.");
498 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
499 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
501 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
502 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
503 rte_memcpy(filter->src_ipaddr,
504 ipv6_spec->hdr.src_addr, 16);
505 rte_memcpy(filter->dst_ipaddr,
506 ipv6_spec->hdr.dst_addr, 16);
507 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
509 rte_memcpy(filter->src_ipaddr_mask,
510 ipv6_mask->hdr.src_addr, 16);
511 en |= !use_ntuple ? 0 :
512 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
514 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
516 rte_memcpy(filter->dst_ipaddr_mask,
517 ipv6_mask->hdr.dst_addr, 16);
518 en |= !use_ntuple ? 0 :
519 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
521 filter->ip_addr_type = use_ntuple ?
522 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
523 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
525 case RTE_FLOW_ITEM_TYPE_TCP:
526 tcp_spec = item->spec;
527 tcp_mask = item->mask;
529 /* Check TCP mask. Only DST & SRC ports are maskable */
530 if (tcp_mask->hdr.sent_seq ||
531 tcp_mask->hdr.recv_ack ||
532 tcp_mask->hdr.data_off ||
533 tcp_mask->hdr.tcp_flags ||
534 tcp_mask->hdr.rx_win ||
535 tcp_mask->hdr.cksum ||
536 tcp_mask->hdr.tcp_urp) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
543 filter->src_port = tcp_spec->hdr.src_port;
544 filter->dst_port = tcp_spec->hdr.dst_port;
546 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
547 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
549 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
550 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
551 if (tcp_mask->hdr.dst_port) {
552 filter->dst_port_mask = tcp_mask->hdr.dst_port;
553 en |= !use_ntuple ? 0 :
554 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
556 if (tcp_mask->hdr.src_port) {
557 filter->src_port_mask = tcp_mask->hdr.src_port;
558 en |= !use_ntuple ? 0 :
559 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
562 case RTE_FLOW_ITEM_TYPE_UDP:
563 udp_spec = item->spec;
564 udp_mask = item->mask;
566 if (udp_mask->hdr.dgram_len ||
567 udp_mask->hdr.dgram_cksum) {
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM,
575 filter->src_port = udp_spec->hdr.src_port;
576 filter->dst_port = udp_spec->hdr.dst_port;
578 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
579 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
581 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
582 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
584 if (udp_mask->hdr.dst_port) {
585 filter->dst_port_mask = udp_mask->hdr.dst_port;
586 en |= !use_ntuple ? 0 :
587 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
589 if (udp_mask->hdr.src_port) {
590 filter->src_port_mask = udp_mask->hdr.src_port;
591 en |= !use_ntuple ? 0 :
592 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
595 case RTE_FLOW_ITEM_TYPE_VXLAN:
596 vxlan_spec = item->spec;
597 vxlan_mask = item->mask;
598 /* Check if VXLAN item is used to describe protocol.
599 * If yes, both spec and mask should be NULL.
600 * If no, both spec and mask shouldn't be NULL.
602 if ((!vxlan_spec && vxlan_mask) ||
603 (vxlan_spec && !vxlan_mask)) {
604 rte_flow_error_set(error, EINVAL,
605 RTE_FLOW_ERROR_TYPE_ITEM,
607 "Invalid VXLAN item");
611 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
612 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
613 vxlan_spec->flags != 0x8) {
614 rte_flow_error_set(error, EINVAL,
615 RTE_FLOW_ERROR_TYPE_ITEM,
617 "Invalid VXLAN item");
621 /* Check if VNI is masked. */
622 if (vxlan_spec && vxlan_mask) {
624 !!memcmp(vxlan_mask->vni, vni_mask,
627 rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ITEM,
634 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
637 rte_be_to_cpu_32(tenant_id_be);
638 filter->tunnel_type =
639 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
642 case RTE_FLOW_ITEM_TYPE_NVGRE:
643 nvgre_spec = item->spec;
644 nvgre_mask = item->mask;
645 /* Check if NVGRE item is used to describe protocol.
646 * If yes, both spec and mask should be NULL.
647 * If no, both spec and mask shouldn't be NULL.
649 if ((!nvgre_spec && nvgre_mask) ||
650 (nvgre_spec && !nvgre_mask)) {
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ITEM,
654 "Invalid NVGRE item");
658 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
659 nvgre_spec->protocol != 0x6558) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM,
663 "Invalid NVGRE item");
667 if (nvgre_spec && nvgre_mask) {
669 !!memcmp(nvgre_mask->tni, tni_mask,
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ITEM,
678 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
681 rte_be_to_cpu_32(tenant_id_be);
682 filter->tunnel_type =
683 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
686 case RTE_FLOW_ITEM_TYPE_VF:
687 vf_spec = item->spec;
690 rte_flow_error_set(error, EINVAL,
691 RTE_FLOW_ERROR_TYPE_ITEM,
693 "Configuring on a VF!");
697 if (vf >= bp->pdev->max_vfs) {
698 rte_flow_error_set(error, EINVAL,
699 RTE_FLOW_ERROR_TYPE_ITEM,
705 if (!attr->transfer) {
706 rte_flow_error_set(error, ENOTSUP,
707 RTE_FLOW_ERROR_TYPE_ITEM,
709 "Matching VF traffic without"
710 " affecting it (transfer attribute)"
715 filter->mirror_vnic_id =
716 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
718 /* This simply indicates there's no driver
719 * loaded. This is not an error.
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
724 "Unable to get default VNIC for VF");
727 filter->mirror_vnic_id = dflt_vnic;
728 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
735 filter->enables = en;
740 /* Parse attributes */
742 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
743 struct rte_flow_error *error)
745 /* Must be input direction */
746 if (!attr->ingress) {
747 rte_flow_error_set(error, EINVAL,
748 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
749 attr, "Only support ingress.");
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
757 attr, "No support for egress.");
762 if (attr->priority) {
763 rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
765 attr, "No support for priority.");
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
773 attr, "No support for group.");
780 struct bnxt_filter_info *
781 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
782 struct bnxt_vnic_info *vnic)
784 struct bnxt_filter_info *filter1, *f0;
785 struct bnxt_vnic_info *vnic0;
788 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
789 f0 = STAILQ_FIRST(&vnic0->filter);
791 //This flow has same DST MAC as the port/l2 filter.
792 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
795 //This flow needs DST MAC which is not same as port/l2
796 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
797 filter1 = bnxt_get_unused_filter(bp);
800 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
801 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
802 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
803 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
804 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
805 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
808 bnxt_free_filter(bp, filter1);
815 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
816 const struct rte_flow_item pattern[],
817 const struct rte_flow_action actions[],
818 const struct rte_flow_attr *attr,
819 struct rte_flow_error *error,
820 struct bnxt_filter_info *filter)
822 const struct rte_flow_action *act = nxt_non_void_action(actions);
823 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
824 const struct rte_flow_action_queue *act_q;
825 const struct rte_flow_action_vf *act_vf;
826 struct bnxt_vnic_info *vnic, *vnic0;
827 struct bnxt_filter_info *filter1;
832 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
833 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
836 "Cannot create flow on RSS queues");
841 rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
846 rc = bnxt_flow_parse_attr(attr, error);
849 //Since we support ingress attribute only - right now.
850 if (filter->filter_type == HWRM_CFA_EM_FILTER)
851 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
854 case RTE_FLOW_ACTION_TYPE_QUEUE:
855 /* Allow this flow. Redirect to a VNIC. */
856 act_q = (const struct rte_flow_action_queue *)act->conf;
857 if (act_q->index >= bp->rx_nr_rings) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ACTION, act,
860 "Invalid queue ID.");
864 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
866 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
867 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ACTION, act,
871 "No matching VNIC for queue ID.");
875 filter->dst_id = vnic->fw_vnic_id;
876 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
877 if (filter1 == NULL) {
881 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
882 PMD_DRV_LOG(DEBUG, "VNIC found\n");
884 case RTE_FLOW_ACTION_TYPE_DROP:
885 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
886 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
887 if (filter1 == NULL) {
891 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
892 if (filter->filter_type == HWRM_CFA_EM_FILTER)
894 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
897 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
899 case RTE_FLOW_ACTION_TYPE_COUNT:
900 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
901 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
902 if (filter1 == NULL) {
906 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
907 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
909 case RTE_FLOW_ACTION_TYPE_VF:
910 act_vf = (const struct rte_flow_action_vf *)act->conf;
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ACTION,
916 "Configuring on a VF!");
921 if (vf >= bp->pdev->max_vfs) {
922 rte_flow_error_set(error, EINVAL,
923 RTE_FLOW_ERROR_TYPE_ACTION,
930 filter->mirror_vnic_id =
931 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
933 /* This simply indicates there's no driver loaded.
934 * This is not an error.
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_ACTION,
939 "Unable to get default VNIC for VF");
943 filter->mirror_vnic_id = dflt_vnic;
944 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
946 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
947 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
948 if (filter1 == NULL) {
952 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ACTION, act,
963 act = nxt_non_void_action(++act);
964 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ACTION,
967 act, "Invalid action.");
976 bnxt_flow_validate(struct rte_eth_dev *dev,
977 const struct rte_flow_attr *attr,
978 const struct rte_flow_item pattern[],
979 const struct rte_flow_action actions[],
980 struct rte_flow_error *error)
982 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
983 struct bnxt_filter_info *filter;
986 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
990 filter = bnxt_get_unused_filter(bp);
991 if (filter == NULL) {
992 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
996 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
998 /* No need to hold on to this filter if we are just validating flow */
999 filter->fw_l2_filter_id = UINT64_MAX;
1000 bnxt_free_filter(bp, filter);
1006 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1008 struct bnxt_filter_info *mf;
1009 struct rte_flow *flow;
1012 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1013 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1015 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1018 if (mf->filter_type == nf->filter_type &&
1019 mf->flags == nf->flags &&
1020 mf->src_port == nf->src_port &&
1021 mf->src_port_mask == nf->src_port_mask &&
1022 mf->dst_port == nf->dst_port &&
1023 mf->dst_port_mask == nf->dst_port_mask &&
1024 mf->ip_protocol == nf->ip_protocol &&
1025 mf->ip_addr_type == nf->ip_addr_type &&
1026 mf->ethertype == nf->ethertype &&
1027 mf->vni == nf->vni &&
1028 mf->tunnel_type == nf->tunnel_type &&
1029 mf->l2_ovlan == nf->l2_ovlan &&
1030 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1031 mf->l2_ivlan == nf->l2_ivlan &&
1032 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1033 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1034 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1036 !memcmp(mf->src_macaddr, nf->src_macaddr,
1038 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1040 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1041 sizeof(nf->src_ipaddr)) &&
1042 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1043 sizeof(nf->src_ipaddr_mask)) &&
1044 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1045 sizeof(nf->dst_ipaddr)) &&
1046 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1047 sizeof(nf->dst_ipaddr_mask))) {
1048 if (mf->dst_id == nf->dst_id)
1050 /* Same Flow, Different queue
1051 * Clear the old ntuple filter
1053 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1054 bnxt_hwrm_clear_em_filter(bp, mf);
1055 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1056 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1057 /* Free the old filter, update flow
1060 bnxt_free_filter(bp, mf);
1069 static struct rte_flow *
1070 bnxt_flow_create(struct rte_eth_dev *dev,
1071 const struct rte_flow_attr *attr,
1072 const struct rte_flow_item pattern[],
1073 const struct rte_flow_action actions[],
1074 struct rte_flow_error *error)
1076 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1077 struct bnxt_filter_info *filter;
1078 struct bnxt_vnic_info *vnic = NULL;
1079 bool update_flow = false;
1080 struct rte_flow *flow;
1084 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1086 rte_flow_error_set(error, ENOMEM,
1087 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1088 "Failed to allocate memory");
1092 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1094 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1098 filter = bnxt_get_unused_filter(bp);
1099 if (filter == NULL) {
1100 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1104 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1109 ret = bnxt_match_filter(bp, filter);
1110 if (ret == -EEXIST) {
1111 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1112 /* Clear the filter that was created as part of
1113 * validate_and_parse_flow() above
1115 bnxt_hwrm_clear_l2_filter(bp, filter);
1117 } else if (ret == -EXDEV) {
1118 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1119 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1123 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1125 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1126 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1128 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1130 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1131 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1134 for (i = 0; i < bp->nr_vnics; i++) {
1135 vnic = &bp->vnic_info[i];
1136 if (filter->dst_id == vnic->fw_vnic_id)
1141 flow->filter = filter;
1147 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1148 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1152 bnxt_free_filter(bp, filter);
1155 rte_flow_error_set(error, ret,
1156 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1157 "Matching Flow exists.");
1158 else if (ret == -EXDEV)
1159 rte_flow_error_set(error, ret,
1160 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1161 "Flow with pattern exists, updating destination queue");
1163 rte_flow_error_set(error, -ret,
1164 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1165 "Failed to create flow.");
1172 bnxt_flow_destroy(struct rte_eth_dev *dev,
1173 struct rte_flow *flow,
1174 struct rte_flow_error *error)
1176 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1177 struct bnxt_filter_info *filter = flow->filter;
1178 struct bnxt_vnic_info *vnic = flow->vnic;
1181 ret = bnxt_match_filter(bp, filter);
1183 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1184 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1185 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1186 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1187 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1189 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1191 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1194 rte_flow_error_set(error, -ret,
1195 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1196 "Failed to destroy flow.");
1203 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1205 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1206 struct bnxt_vnic_info *vnic;
1207 struct rte_flow *flow;
1211 for (i = 0; i < bp->nr_vnics; i++) {
1212 vnic = &bp->vnic_info[i];
1213 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1214 struct bnxt_filter_info *filter = flow->filter;
1216 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1217 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1218 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1219 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1222 rte_flow_error_set(error, -ret,
1223 RTE_FLOW_ERROR_TYPE_HANDLE,
1225 "Failed to flush flow in HW.");
1229 STAILQ_REMOVE(&vnic->flow_list, flow,
1238 const struct rte_flow_ops bnxt_flow_ops = {
1239 .validate = bnxt_flow_validate,
1240 .create = bnxt_flow_create,
1241 .destroy = bnxt_flow_destroy,
1242 .flush = bnxt_flow_flush,