1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
27 struct bnxt_filter_info *filter;
29 /* Find the 1st unused filter from the free_filter_list pool*/
30 filter = STAILQ_FIRST(&bp->free_filter_list);
32 PMD_DRV_LOG(ERR, "No more free filter resources\n");
35 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
37 /* Default to L2 MAC Addr filter */
38 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
43 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
49 struct bnxt_filter_info *filter;
51 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
53 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
58 filter->fw_l2_filter_id = UINT64_MAX;
59 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
63 void bnxt_init_filters(struct bnxt *bp)
65 struct bnxt_filter_info *filter;
68 max_filters = bp->max_l2_ctx;
69 STAILQ_INIT(&bp->free_filter_list);
70 for (i = 0; i < max_filters; i++) {
71 filter = &bp->filter_info[i];
72 filter->fw_l2_filter_id = UINT64_MAX;
73 filter->fw_em_filter_id = UINT64_MAX;
74 filter->fw_ntuple_filter_id = UINT64_MAX;
75 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
79 void bnxt_free_all_filters(struct bnxt *bp)
81 struct bnxt_vnic_info *vnic;
82 struct bnxt_filter_info *filter, *temp_filter;
85 for (i = 0; i < MAX_FF_POOLS; i++) {
86 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87 filter = STAILQ_FIRST(&vnic->filter);
89 temp_filter = STAILQ_NEXT(filter, next);
90 STAILQ_REMOVE(&vnic->filter, filter,
91 bnxt_filter_info, next);
92 STAILQ_INSERT_TAIL(&bp->free_filter_list,
96 STAILQ_INIT(&vnic->filter);
100 for (i = 0; i < bp->pf.max_vfs; i++) {
101 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102 bnxt_hwrm_clear_l2_filter(bp, filter);
107 void bnxt_free_filter_mem(struct bnxt *bp)
109 struct bnxt_filter_info *filter;
110 uint16_t max_filters, i;
113 if (bp->filter_info == NULL)
116 /* Ensure that all filters are freed */
117 max_filters = bp->max_l2_ctx;
118 for (i = 0; i < max_filters; i++) {
119 filter = &bp->filter_info[i];
120 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122 /* Call HWRM to try to free filter again */
123 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
126 "HWRM filter cannot be freed rc = %d\n",
129 filter->fw_l2_filter_id = UINT64_MAX;
131 STAILQ_INIT(&bp->free_filter_list);
133 rte_free(bp->filter_info);
134 bp->filter_info = NULL;
136 for (i = 0; i < bp->pf.max_vfs; i++) {
137 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
139 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140 bnxt_filter_info, next);
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
147 struct bnxt_filter_info *filter_mem;
148 uint16_t max_filters;
150 max_filters = bp->max_l2_ctx;
151 /* Allocate memory for VNIC pool and filter pool */
152 filter_mem = rte_zmalloc("bnxt_filter_info",
153 max_filters * sizeof(struct bnxt_filter_info),
155 if (filter_mem == NULL) {
156 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
160 bp->filter_info = filter_mem;
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
166 struct bnxt_filter_info *filter;
168 /* Find the 1st unused filter from the free_filter_list pool*/
169 filter = STAILQ_FIRST(&bp->free_filter_list);
171 PMD_DRV_LOG(ERR, "No more free filter resources\n");
174 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
181 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186 const struct rte_flow_item pattern[],
187 const struct rte_flow_action actions[],
188 struct rte_flow_error *error)
191 rte_flow_error_set(error, EINVAL,
192 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193 NULL, "NULL pattern.");
198 rte_flow_error_set(error, EINVAL,
199 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200 NULL, "NULL action.");
205 rte_flow_error_set(error, EINVAL,
206 RTE_FLOW_ERROR_TYPE_ATTR,
207 NULL, "NULL attribute.");
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
218 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
228 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
234 int check_zero_bytes(const uint8_t *bytes, int len)
237 for (i = 0; i < len; i++)
238 if (bytes[i] != 0x00)
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245 struct rte_flow_error *error __rte_unused)
247 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
250 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251 switch (item->type) {
252 case RTE_FLOW_ITEM_TYPE_ETH:
255 case RTE_FLOW_ITEM_TYPE_VLAN:
258 case RTE_FLOW_ITEM_TYPE_IPV4:
259 case RTE_FLOW_ITEM_TYPE_IPV6:
260 case RTE_FLOW_ITEM_TYPE_TCP:
261 case RTE_FLOW_ITEM_TYPE_UDP:
263 /* need ntuple match, reset exact match */
266 "VLAN flow cannot use NTUPLE filter\n");
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
270 "Cannot use VLAN with NTUPLE");
276 PMD_DRV_LOG(ERR, "Unknown Flow type");
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286 const struct rte_flow_item pattern[],
287 struct rte_flow_error *error,
288 struct bnxt_filter_info *filter)
290 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
291 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
292 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
293 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
294 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
295 const struct rte_flow_item_udp *udp_spec, *udp_mask;
296 const struct rte_flow_item_eth *eth_spec, *eth_mask;
297 const struct rte_flow_item_nvgre *nvgre_spec;
298 const struct rte_flow_item_nvgre *nvgre_mask;
299 const struct rte_flow_item_vxlan *vxlan_spec;
300 const struct rte_flow_item_vxlan *vxlan_mask;
301 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
302 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
303 const struct rte_flow_item_vf *vf_spec;
304 uint32_t tenant_id_be = 0;
310 uint32_t en_ethertype;
313 use_ntuple = bnxt_filter_type_check(pattern, error);
314 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
318 filter->filter_type = use_ntuple ?
319 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
320 en_ethertype = use_ntuple ?
321 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
322 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
324 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
326 /* last or range is NOT supported as match criteria */
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM,
330 "No support for range");
333 if (!item->spec || !item->mask) {
334 rte_flow_error_set(error, EINVAL,
335 RTE_FLOW_ERROR_TYPE_ITEM,
337 "spec/mask is NULL");
340 switch (item->type) {
341 case RTE_FLOW_ITEM_TYPE_ETH:
342 eth_spec = item->spec;
343 eth_mask = item->mask;
345 /* Source MAC address mask cannot be partially set.
346 * Should be All 0's or all 1's.
347 * Destination MAC address mask must not be partially
348 * set. Should be all 1's or all 0's.
350 if ((!is_zero_ether_addr(ð_mask->src) &&
351 !is_broadcast_ether_addr(ð_mask->src)) ||
352 (!is_zero_ether_addr(ð_mask->dst) &&
353 !is_broadcast_ether_addr(ð_mask->dst))) {
354 rte_flow_error_set(error, EINVAL,
355 RTE_FLOW_ERROR_TYPE_ITEM,
357 "MAC_addr mask not valid");
361 /* Mask is not allowed. Only exact matches are */
362 if (eth_mask->type &&
363 eth_mask->type != RTE_BE16(0xffff)) {
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ITEM,
367 "ethertype mask not valid");
371 if (is_broadcast_ether_addr(ð_mask->dst)) {
372 rte_memcpy(filter->dst_macaddr,
375 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
376 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
378 if (is_broadcast_ether_addr(ð_mask->src)) {
379 rte_memcpy(filter->src_macaddr,
382 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
383 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
386 * RTE_LOG(ERR, PMD, "Handle this condition\n");
389 if (eth_mask->type) {
391 rte_be_to_cpu_16(eth_spec->type);
396 case RTE_FLOW_ITEM_TYPE_VLAN:
397 vlan_spec = item->spec;
398 vlan_mask = item->mask;
399 if (en & en_ethertype) {
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ITEM,
403 "VLAN TPID matching is not"
407 if (vlan_mask->tci &&
408 vlan_mask->tci == RTE_BE16(0x0fff)) {
409 /* Only the VLAN ID can be matched. */
411 rte_be_to_cpu_16(vlan_spec->tci &
413 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
414 } else if (vlan_mask->tci) {
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ITEM,
418 "VLAN mask is invalid");
421 if (vlan_mask->inner_type &&
422 vlan_mask->inner_type != RTE_BE16(0xffff)) {
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
426 "inner ethertype mask not"
430 if (vlan_mask->inner_type) {
432 rte_be_to_cpu_16(vlan_spec->inner_type);
437 case RTE_FLOW_ITEM_TYPE_IPV4:
438 /* If mask is not involved, we could use EM filters. */
439 ipv4_spec = item->spec;
440 ipv4_mask = item->mask;
441 /* Only IP DST and SRC fields are maskable. */
442 if (ipv4_mask->hdr.version_ihl ||
443 ipv4_mask->hdr.type_of_service ||
444 ipv4_mask->hdr.total_length ||
445 ipv4_mask->hdr.packet_id ||
446 ipv4_mask->hdr.fragment_offset ||
447 ipv4_mask->hdr.time_to_live ||
448 ipv4_mask->hdr.next_proto_id ||
449 ipv4_mask->hdr.hdr_checksum) {
450 rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ITEM,
453 "Invalid IPv4 mask.");
456 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
457 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
459 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
460 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
462 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
463 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
464 if (ipv4_mask->hdr.src_addr) {
465 filter->src_ipaddr_mask[0] =
466 ipv4_mask->hdr.src_addr;
467 en |= !use_ntuple ? 0 :
468 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
470 if (ipv4_mask->hdr.dst_addr) {
471 filter->dst_ipaddr_mask[0] =
472 ipv4_mask->hdr.dst_addr;
473 en |= !use_ntuple ? 0 :
474 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
476 filter->ip_addr_type = use_ntuple ?
477 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
478 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
479 if (ipv4_spec->hdr.next_proto_id) {
480 filter->ip_protocol =
481 ipv4_spec->hdr.next_proto_id;
483 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
485 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
488 case RTE_FLOW_ITEM_TYPE_IPV6:
489 ipv6_spec = item->spec;
490 ipv6_mask = item->mask;
492 /* Only IP DST and SRC fields are maskable. */
493 if (ipv6_mask->hdr.vtc_flow ||
494 ipv6_mask->hdr.payload_len ||
495 ipv6_mask->hdr.proto ||
496 ipv6_mask->hdr.hop_limits) {
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
500 "Invalid IPv6 mask.");
505 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
506 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
508 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
509 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
510 rte_memcpy(filter->src_ipaddr,
511 ipv6_spec->hdr.src_addr, 16);
512 rte_memcpy(filter->dst_ipaddr,
513 ipv6_spec->hdr.dst_addr, 16);
514 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
515 rte_memcpy(filter->src_ipaddr_mask,
516 ipv6_mask->hdr.src_addr, 16);
517 en |= !use_ntuple ? 0 :
518 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
520 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
521 rte_memcpy(filter->dst_ipaddr_mask,
522 ipv6_mask->hdr.dst_addr, 16);
523 en |= !use_ntuple ? 0 :
524 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
526 filter->ip_addr_type = use_ntuple ?
527 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
528 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
530 case RTE_FLOW_ITEM_TYPE_TCP:
531 tcp_spec = item->spec;
532 tcp_mask = item->mask;
534 /* Check TCP mask. Only DST & SRC ports are maskable */
535 if (tcp_mask->hdr.sent_seq ||
536 tcp_mask->hdr.recv_ack ||
537 tcp_mask->hdr.data_off ||
538 tcp_mask->hdr.tcp_flags ||
539 tcp_mask->hdr.rx_win ||
540 tcp_mask->hdr.cksum ||
541 tcp_mask->hdr.tcp_urp) {
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ITEM,
548 filter->src_port = tcp_spec->hdr.src_port;
549 filter->dst_port = tcp_spec->hdr.dst_port;
551 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
552 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
554 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
555 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
556 if (tcp_mask->hdr.dst_port) {
557 filter->dst_port_mask = tcp_mask->hdr.dst_port;
558 en |= !use_ntuple ? 0 :
559 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
561 if (tcp_mask->hdr.src_port) {
562 filter->src_port_mask = tcp_mask->hdr.src_port;
563 en |= !use_ntuple ? 0 :
564 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
567 case RTE_FLOW_ITEM_TYPE_UDP:
568 udp_spec = item->spec;
569 udp_mask = item->mask;
571 if (udp_mask->hdr.dgram_len ||
572 udp_mask->hdr.dgram_cksum) {
573 rte_flow_error_set(error, EINVAL,
574 RTE_FLOW_ERROR_TYPE_ITEM,
580 filter->src_port = udp_spec->hdr.src_port;
581 filter->dst_port = udp_spec->hdr.dst_port;
583 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
584 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
586 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
587 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
589 if (udp_mask->hdr.dst_port) {
590 filter->dst_port_mask = udp_mask->hdr.dst_port;
591 en |= !use_ntuple ? 0 :
592 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
594 if (udp_mask->hdr.src_port) {
595 filter->src_port_mask = udp_mask->hdr.src_port;
596 en |= !use_ntuple ? 0 :
597 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
600 case RTE_FLOW_ITEM_TYPE_VXLAN:
601 vxlan_spec = item->spec;
602 vxlan_mask = item->mask;
603 /* Check if VXLAN item is used to describe protocol.
604 * If yes, both spec and mask should be NULL.
605 * If no, both spec and mask shouldn't be NULL.
607 if ((!vxlan_spec && vxlan_mask) ||
608 (vxlan_spec && !vxlan_mask)) {
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ITEM,
612 "Invalid VXLAN item");
616 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
617 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
618 vxlan_spec->flags != 0x8) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ITEM,
622 "Invalid VXLAN item");
626 /* Check if VNI is masked. */
627 if (vxlan_spec && vxlan_mask) {
629 !!memcmp(vxlan_mask->vni, vni_mask,
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ITEM,
639 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
642 rte_be_to_cpu_32(tenant_id_be);
643 filter->tunnel_type =
644 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
647 case RTE_FLOW_ITEM_TYPE_NVGRE:
648 nvgre_spec = item->spec;
649 nvgre_mask = item->mask;
650 /* Check if NVGRE item is used to describe protocol.
651 * If yes, both spec and mask should be NULL.
652 * If no, both spec and mask shouldn't be NULL.
654 if ((!nvgre_spec && nvgre_mask) ||
655 (nvgre_spec && !nvgre_mask)) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "Invalid NVGRE item");
663 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
664 nvgre_spec->protocol != 0x6558) {
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ITEM,
668 "Invalid NVGRE item");
672 if (nvgre_spec && nvgre_mask) {
674 !!memcmp(nvgre_mask->tni, tni_mask,
677 rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ITEM,
683 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
686 rte_be_to_cpu_32(tenant_id_be);
687 filter->tunnel_type =
688 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
691 case RTE_FLOW_ITEM_TYPE_VF:
692 vf_spec = item->spec;
695 rte_flow_error_set(error, EINVAL,
696 RTE_FLOW_ERROR_TYPE_ITEM,
698 "Configuring on a VF!");
702 if (vf >= bp->pdev->max_vfs) {
703 rte_flow_error_set(error, EINVAL,
704 RTE_FLOW_ERROR_TYPE_ITEM,
710 filter->mirror_vnic_id =
711 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
713 /* This simply indicates there's no driver
714 * loaded. This is not an error.
716 rte_flow_error_set(error, EINVAL,
717 RTE_FLOW_ERROR_TYPE_ITEM,
719 "Unable to get default VNIC for VF");
722 filter->mirror_vnic_id = dflt_vnic;
723 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
730 filter->enables = en;
735 /* Parse attributes */
737 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
738 struct rte_flow_error *error)
740 /* Must be input direction */
741 if (!attr->ingress) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744 attr, "Only support ingress.");
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752 attr, "No support for egress.");
757 if (attr->priority) {
758 rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760 attr, "No support for priority.");
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768 attr, "No support for group.");
775 struct bnxt_filter_info *
776 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
777 struct bnxt_vnic_info *vnic)
779 struct bnxt_filter_info *filter1, *f0;
780 struct bnxt_vnic_info *vnic0;
783 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
784 f0 = STAILQ_FIRST(&vnic0->filter);
786 //This flow has same DST MAC as the port/l2 filter.
787 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
790 //This flow needs DST MAC which is not same as port/l2
791 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
792 filter1 = bnxt_get_unused_filter(bp);
795 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
796 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
797 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
798 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
799 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
800 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
803 bnxt_free_filter(bp, filter1);
810 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
811 const struct rte_flow_item pattern[],
812 const struct rte_flow_action actions[],
813 const struct rte_flow_attr *attr,
814 struct rte_flow_error *error,
815 struct bnxt_filter_info *filter)
817 const struct rte_flow_action *act = nxt_non_void_action(actions);
818 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
819 const struct rte_flow_action_queue *act_q;
820 const struct rte_flow_action_vf *act_vf;
821 struct bnxt_vnic_info *vnic, *vnic0;
822 struct bnxt_filter_info *filter1;
827 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
828 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
831 "Cannot create flow on RSS queues");
836 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
840 rc = bnxt_flow_parse_attr(attr, error);
843 //Since we support ingress attribute only - right now.
844 if (filter->filter_type == HWRM_CFA_EM_FILTER)
845 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
848 case RTE_FLOW_ACTION_TYPE_QUEUE:
849 /* Allow this flow. Redirect to a VNIC. */
850 act_q = (const struct rte_flow_action_queue *)act->conf;
851 if (act_q->index >= bp->rx_nr_rings) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ACTION, act,
854 "Invalid queue ID.");
858 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
860 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
861 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ACTION, act,
865 "No matching VNIC for queue ID.");
869 filter->dst_id = vnic->fw_vnic_id;
870 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
871 if (filter1 == NULL) {
875 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
876 PMD_DRV_LOG(DEBUG, "VNIC found\n");
878 case RTE_FLOW_ACTION_TYPE_DROP:
879 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
880 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
881 if (filter1 == NULL) {
885 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
886 if (filter->filter_type == HWRM_CFA_EM_FILTER)
888 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
891 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
893 case RTE_FLOW_ACTION_TYPE_COUNT:
894 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
895 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
896 if (filter1 == NULL) {
900 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
901 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
903 case RTE_FLOW_ACTION_TYPE_VF:
904 act_vf = (const struct rte_flow_action_vf *)act->conf;
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_ACTION,
910 "Configuring on a VF!");
915 if (vf >= bp->pdev->max_vfs) {
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ACTION,
924 filter->mirror_vnic_id =
925 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
927 /* This simply indicates there's no driver loaded.
928 * This is not an error.
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION,
933 "Unable to get default VNIC for VF");
937 filter->mirror_vnic_id = dflt_vnic;
938 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
940 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
941 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
942 if (filter1 == NULL) {
946 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ACTION, act,
957 act = nxt_non_void_action(++act);
958 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION,
961 act, "Invalid action.");
970 bnxt_flow_validate(struct rte_eth_dev *dev,
971 const struct rte_flow_attr *attr,
972 const struct rte_flow_item pattern[],
973 const struct rte_flow_action actions[],
974 struct rte_flow_error *error)
976 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
977 struct bnxt_filter_info *filter;
980 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
984 filter = bnxt_get_unused_filter(bp);
985 if (filter == NULL) {
986 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
990 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
992 /* No need to hold on to this filter if we are just validating flow */
993 filter->fw_l2_filter_id = UINT64_MAX;
994 bnxt_free_filter(bp, filter);
1000 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1002 struct bnxt_filter_info *mf;
1003 struct rte_flow *flow;
1006 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1007 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1009 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1012 if (mf->filter_type == nf->filter_type &&
1013 mf->flags == nf->flags &&
1014 mf->src_port == nf->src_port &&
1015 mf->src_port_mask == nf->src_port_mask &&
1016 mf->dst_port == nf->dst_port &&
1017 mf->dst_port_mask == nf->dst_port_mask &&
1018 mf->ip_protocol == nf->ip_protocol &&
1019 mf->ip_addr_type == nf->ip_addr_type &&
1020 mf->ethertype == nf->ethertype &&
1021 mf->vni == nf->vni &&
1022 mf->tunnel_type == nf->tunnel_type &&
1023 mf->l2_ovlan == nf->l2_ovlan &&
1024 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1025 mf->l2_ivlan == nf->l2_ivlan &&
1026 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1027 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1028 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1030 !memcmp(mf->src_macaddr, nf->src_macaddr,
1032 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1034 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1035 sizeof(nf->src_ipaddr)) &&
1036 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1037 sizeof(nf->src_ipaddr_mask)) &&
1038 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1039 sizeof(nf->dst_ipaddr)) &&
1040 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1041 sizeof(nf->dst_ipaddr_mask))) {
1042 if (mf->dst_id == nf->dst_id)
1044 /* Same Flow, Different queue
1045 * Clear the old ntuple filter
1047 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1048 bnxt_hwrm_clear_em_filter(bp, mf);
1049 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1050 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1051 /* Free the old filter, update flow
1054 bnxt_free_filter(bp, mf);
1063 static struct rte_flow *
1064 bnxt_flow_create(struct rte_eth_dev *dev,
1065 const struct rte_flow_attr *attr,
1066 const struct rte_flow_item pattern[],
1067 const struct rte_flow_action actions[],
1068 struct rte_flow_error *error)
1070 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1071 struct bnxt_filter_info *filter;
1072 struct bnxt_vnic_info *vnic = NULL;
1073 bool update_flow = false;
1074 struct rte_flow *flow;
1078 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1080 rte_flow_error_set(error, ENOMEM,
1081 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1082 "Failed to allocate memory");
1086 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1088 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1092 filter = bnxt_get_unused_filter(bp);
1093 if (filter == NULL) {
1094 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1098 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1103 ret = bnxt_match_filter(bp, filter);
1104 if (ret == -EEXIST) {
1105 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1106 /* Clear the filter that was created as part of
1107 * validate_and_parse_flow() above
1109 bnxt_hwrm_clear_l2_filter(bp, filter);
1111 } else if (ret == -EXDEV) {
1112 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1113 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1117 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1119 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1120 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1122 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1124 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1125 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1128 for (i = 0; i < bp->nr_vnics; i++) {
1129 vnic = &bp->vnic_info[i];
1130 if (filter->dst_id == vnic->fw_vnic_id)
1135 flow->filter = filter;
1141 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1142 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1146 bnxt_free_filter(bp, filter);
1149 rte_flow_error_set(error, ret,
1150 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1151 "Matching Flow exists.");
1152 else if (ret == -EXDEV)
1153 rte_flow_error_set(error, ret,
1154 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1155 "Flow with pattern exists, updating destination queue");
1157 rte_flow_error_set(error, -ret,
1158 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1159 "Failed to create flow.");
1166 bnxt_flow_destroy(struct rte_eth_dev *dev,
1167 struct rte_flow *flow,
1168 struct rte_flow_error *error)
1170 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1171 struct bnxt_filter_info *filter = flow->filter;
1172 struct bnxt_vnic_info *vnic = flow->vnic;
1175 ret = bnxt_match_filter(bp, filter);
1177 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1178 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1179 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1180 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1181 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1183 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1185 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1188 rte_flow_error_set(error, -ret,
1189 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1190 "Failed to destroy flow.");
1197 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1199 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1200 struct bnxt_vnic_info *vnic;
1201 struct rte_flow *flow;
1205 for (i = 0; i < bp->nr_vnics; i++) {
1206 vnic = &bp->vnic_info[i];
1207 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1208 struct bnxt_filter_info *filter = flow->filter;
1210 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1211 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1212 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1213 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1216 rte_flow_error_set(error, -ret,
1217 RTE_FLOW_ERROR_TYPE_HANDLE,
1219 "Failed to flush flow in HW.");
1223 STAILQ_REMOVE(&vnic->flow_list, flow,
1232 const struct rte_flow_ops bnxt_flow_ops = {
1233 .validate = bnxt_flow_validate,
1234 .create = bnxt_flow_create,
1235 .destroy = bnxt_flow_destroy,
1236 .flush = bnxt_flow_flush,