4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
37 #include <rte_malloc.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
54 struct bnxt_filter_info *filter;
56 /* Find the 1st unused filter from the free_filter_list pool*/
57 filter = STAILQ_FIRST(&bp->free_filter_list);
59 RTE_LOG(ERR, PMD, "No more free filter resources\n");
62 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
64 /* Default to L2 MAC Addr filter */
65 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
70 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
76 struct bnxt_filter_info *filter;
78 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
80 RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
85 filter->fw_l2_filter_id = UINT64_MAX;
86 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
90 void bnxt_init_filters(struct bnxt *bp)
92 struct bnxt_filter_info *filter;
95 max_filters = bp->max_l2_ctx;
96 STAILQ_INIT(&bp->free_filter_list);
97 for (i = 0; i < max_filters; i++) {
98 filter = &bp->filter_info[i];
99 filter->fw_l2_filter_id = -1;
100 filter->fw_em_filter_id = -1;
101 filter->fw_ntuple_filter_id = -1;
102 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
106 void bnxt_free_all_filters(struct bnxt *bp)
108 struct bnxt_vnic_info *vnic;
109 struct bnxt_filter_info *filter, *temp_filter;
112 for (i = 0; i < MAX_FF_POOLS; i++) {
113 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114 filter = STAILQ_FIRST(&vnic->filter);
116 temp_filter = STAILQ_NEXT(filter, next);
117 STAILQ_REMOVE(&vnic->filter, filter,
118 bnxt_filter_info, next);
119 STAILQ_INSERT_TAIL(&bp->free_filter_list,
121 filter = temp_filter;
123 STAILQ_INIT(&vnic->filter);
127 for (i = 0; i < bp->pf.max_vfs; i++) {
128 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129 bnxt_hwrm_clear_l2_filter(bp, filter);
134 void bnxt_free_filter_mem(struct bnxt *bp)
136 struct bnxt_filter_info *filter;
137 uint16_t max_filters, i;
140 if (bp->filter_info == NULL)
143 /* Ensure that all filters are freed */
144 max_filters = bp->max_l2_ctx;
145 for (i = 0; i < max_filters; i++) {
146 filter = &bp->filter_info[i];
147 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148 RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
149 /* Call HWRM to try to free filter again */
150 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
153 "HWRM filter cannot be freed rc = %d\n",
156 filter->fw_l2_filter_id = UINT64_MAX;
158 STAILQ_INIT(&bp->free_filter_list);
160 rte_free(bp->filter_info);
161 bp->filter_info = NULL;
164 int bnxt_alloc_filter_mem(struct bnxt *bp)
166 struct bnxt_filter_info *filter_mem;
167 uint16_t max_filters;
169 max_filters = bp->max_l2_ctx;
170 /* Allocate memory for VNIC pool and filter pool */
171 filter_mem = rte_zmalloc("bnxt_filter_info",
172 max_filters * sizeof(struct bnxt_filter_info),
174 if (filter_mem == NULL) {
175 RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
179 bp->filter_info = filter_mem;
183 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
185 struct bnxt_filter_info *filter;
187 /* Find the 1st unused filter from the free_filter_list pool*/
188 filter = STAILQ_FIRST(&bp->free_filter_list);
190 RTE_LOG(ERR, PMD, "No more free filter resources\n");
193 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
198 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
200 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
204 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
205 const struct rte_flow_item pattern[],
206 const struct rte_flow_action actions[],
207 struct rte_flow_error *error)
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212 NULL, "NULL pattern.");
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219 NULL, "NULL action.");
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ATTR,
226 NULL, "NULL attribute.");
233 static const struct rte_flow_item *
234 nxt_non_void_pattern(const struct rte_flow_item *cur)
237 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
243 static const struct rte_flow_action *
244 nxt_non_void_action(const struct rte_flow_action *cur)
247 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
253 static inline int check_zero_bytes(const uint8_t *bytes, int len)
256 for (i = 0; i < len; i++)
257 if (bytes[i] != 0x00)
263 bnxt_filter_type_check(const struct rte_flow_item pattern[],
264 struct rte_flow_error *error __rte_unused)
266 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
269 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
270 switch (item->type) {
271 case RTE_FLOW_ITEM_TYPE_ETH:
274 case RTE_FLOW_ITEM_TYPE_VLAN:
277 case RTE_FLOW_ITEM_TYPE_IPV4:
278 case RTE_FLOW_ITEM_TYPE_IPV6:
279 case RTE_FLOW_ITEM_TYPE_TCP:
280 case RTE_FLOW_ITEM_TYPE_UDP:
282 /* need ntuple match, reset exact match */
285 "VLAN flow cannot use NTUPLE filter\n");
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "Cannot use VLAN with NTUPLE");
295 RTE_LOG(ERR, PMD, "Unknown Flow type");
304 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
305 const struct rte_flow_item pattern[],
306 struct rte_flow_error *error,
307 struct bnxt_filter_info *filter)
309 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
310 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
311 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
312 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
313 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
314 const struct rte_flow_item_udp *udp_spec, *udp_mask;
315 const struct rte_flow_item_eth *eth_spec, *eth_mask;
316 const struct rte_flow_item_nvgre *nvgre_spec;
317 const struct rte_flow_item_nvgre *nvgre_mask;
318 const struct rte_flow_item_vxlan *vxlan_spec;
319 const struct rte_flow_item_vxlan *vxlan_mask;
320 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
321 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
322 const struct rte_flow_item_vf *vf_spec;
323 uint32_t tenant_id_be = 0;
331 use_ntuple = bnxt_filter_type_check(pattern, error);
332 RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
336 filter->filter_type = use_ntuple ?
337 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
339 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
341 /* last or range is NOT supported as match criteria */
342 rte_flow_error_set(error, EINVAL,
343 RTE_FLOW_ERROR_TYPE_ITEM,
345 "No support for range");
348 if (!item->spec || !item->mask) {
349 rte_flow_error_set(error, EINVAL,
350 RTE_FLOW_ERROR_TYPE_ITEM,
352 "spec/mask is NULL");
355 switch (item->type) {
356 case RTE_FLOW_ITEM_TYPE_ETH:
357 eth_spec = (const struct rte_flow_item_eth *)item->spec;
358 eth_mask = (const struct rte_flow_item_eth *)item->mask;
360 /* Source MAC address mask cannot be partially set.
361 * Should be All 0's or all 1's.
362 * Destination MAC address mask must not be partially
363 * set. Should be all 1's or all 0's.
365 if ((!is_zero_ether_addr(ð_mask->src) &&
366 !is_broadcast_ether_addr(ð_mask->src)) ||
367 (!is_zero_ether_addr(ð_mask->dst) &&
368 !is_broadcast_ether_addr(ð_mask->dst))) {
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
372 "MAC_addr mask not valid");
376 /* Mask is not allowed. Only exact matches are */
377 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_ITEM,
381 "ethertype mask not valid");
385 if (is_broadcast_ether_addr(ð_mask->dst)) {
386 rte_memcpy(filter->dst_macaddr,
389 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
390 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
392 if (is_broadcast_ether_addr(ð_mask->src)) {
393 rte_memcpy(filter->src_macaddr,
396 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
397 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
400 * RTE_LOG(ERR, PMD, "Handle this condition\n");
403 if (eth_spec->type) {
405 rte_be_to_cpu_16(eth_spec->type);
407 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
408 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
412 case RTE_FLOW_ITEM_TYPE_VLAN:
414 (const struct rte_flow_item_vlan *)item->spec;
416 (const struct rte_flow_item_vlan *)item->mask;
417 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
418 /* Only the VLAN ID can be matched. */
420 rte_be_to_cpu_16(vlan_spec->tci &
422 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ITEM,
427 "VLAN mask is invalid");
432 case RTE_FLOW_ITEM_TYPE_IPV4:
433 /* If mask is not involved, we could use EM filters. */
435 (const struct rte_flow_item_ipv4 *)item->spec;
437 (const struct rte_flow_item_ipv4 *)item->mask;
438 /* Only IP DST and SRC fields are maskable. */
439 if (ipv4_mask->hdr.version_ihl ||
440 ipv4_mask->hdr.type_of_service ||
441 ipv4_mask->hdr.total_length ||
442 ipv4_mask->hdr.packet_id ||
443 ipv4_mask->hdr.fragment_offset ||
444 ipv4_mask->hdr.time_to_live ||
445 ipv4_mask->hdr.next_proto_id ||
446 ipv4_mask->hdr.hdr_checksum) {
447 rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM,
450 "Invalid IPv4 mask.");
453 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
454 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
456 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
457 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
459 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
460 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
461 if (ipv4_mask->hdr.src_addr) {
462 filter->src_ipaddr_mask[0] =
463 ipv4_mask->hdr.src_addr;
464 en |= !use_ntuple ? 0 :
465 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
467 if (ipv4_mask->hdr.dst_addr) {
468 filter->dst_ipaddr_mask[0] =
469 ipv4_mask->hdr.dst_addr;
470 en |= !use_ntuple ? 0 :
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
473 filter->ip_addr_type = use_ntuple ?
474 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
475 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
476 if (ipv4_spec->hdr.next_proto_id) {
477 filter->ip_protocol =
478 ipv4_spec->hdr.next_proto_id;
480 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
482 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
485 case RTE_FLOW_ITEM_TYPE_IPV6:
487 (const struct rte_flow_item_ipv6 *)item->spec;
489 (const struct rte_flow_item_ipv6 *)item->mask;
491 /* Only IP DST and SRC fields are maskable. */
492 if (ipv6_mask->hdr.vtc_flow ||
493 ipv6_mask->hdr.payload_len ||
494 ipv6_mask->hdr.proto ||
495 ipv6_mask->hdr.hop_limits) {
496 rte_flow_error_set(error, EINVAL,
497 RTE_FLOW_ERROR_TYPE_ITEM,
499 "Invalid IPv6 mask.");
504 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
505 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
507 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
508 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
509 rte_memcpy(filter->src_ipaddr,
510 ipv6_spec->hdr.src_addr, 16);
511 rte_memcpy(filter->dst_ipaddr,
512 ipv6_spec->hdr.dst_addr, 16);
513 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
514 rte_memcpy(filter->src_ipaddr_mask,
515 ipv6_mask->hdr.src_addr, 16);
516 en |= !use_ntuple ? 0 :
517 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
519 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
520 rte_memcpy(filter->dst_ipaddr_mask,
521 ipv6_mask->hdr.dst_addr, 16);
522 en |= !use_ntuple ? 0 :
523 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
525 filter->ip_addr_type = use_ntuple ?
526 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
527 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
529 case RTE_FLOW_ITEM_TYPE_TCP:
530 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
531 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
533 /* Check TCP mask. Only DST & SRC ports are maskable */
534 if (tcp_mask->hdr.sent_seq ||
535 tcp_mask->hdr.recv_ack ||
536 tcp_mask->hdr.data_off ||
537 tcp_mask->hdr.tcp_flags ||
538 tcp_mask->hdr.rx_win ||
539 tcp_mask->hdr.cksum ||
540 tcp_mask->hdr.tcp_urp) {
541 rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ITEM,
547 filter->src_port = tcp_spec->hdr.src_port;
548 filter->dst_port = tcp_spec->hdr.dst_port;
550 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
551 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
553 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
554 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
555 if (tcp_mask->hdr.dst_port) {
556 filter->dst_port_mask = tcp_mask->hdr.dst_port;
557 en |= !use_ntuple ? 0 :
558 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
560 if (tcp_mask->hdr.src_port) {
561 filter->src_port_mask = tcp_mask->hdr.src_port;
562 en |= !use_ntuple ? 0 :
563 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
566 case RTE_FLOW_ITEM_TYPE_UDP:
567 udp_spec = (const struct rte_flow_item_udp *)item->spec;
568 udp_mask = (const struct rte_flow_item_udp *)item->mask;
570 if (udp_mask->hdr.dgram_len ||
571 udp_mask->hdr.dgram_cksum) {
572 rte_flow_error_set(error, EINVAL,
573 RTE_FLOW_ERROR_TYPE_ITEM,
579 filter->src_port = udp_spec->hdr.src_port;
580 filter->dst_port = udp_spec->hdr.dst_port;
582 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
583 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
585 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
586 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
588 if (udp_mask->hdr.dst_port) {
589 filter->dst_port_mask = udp_mask->hdr.dst_port;
590 en |= !use_ntuple ? 0 :
591 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
593 if (udp_mask->hdr.src_port) {
594 filter->src_port_mask = udp_mask->hdr.src_port;
595 en |= !use_ntuple ? 0 :
596 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
599 case RTE_FLOW_ITEM_TYPE_VXLAN:
601 (const struct rte_flow_item_vxlan *)item->spec;
603 (const struct rte_flow_item_vxlan *)item->mask;
604 /* Check if VXLAN item is used to describe protocol.
605 * If yes, both spec and mask should be NULL.
606 * If no, both spec and mask shouldn't be NULL.
608 if ((!vxlan_spec && vxlan_mask) ||
609 (vxlan_spec && !vxlan_mask)) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
613 "Invalid VXLAN item");
617 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
618 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
619 vxlan_spec->flags != 0x8) {
620 rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ITEM,
623 "Invalid VXLAN item");
627 /* Check if VNI is masked. */
628 if (vxlan_spec && vxlan_mask) {
630 !!memcmp(vxlan_mask->vni, vni_mask,
633 rte_flow_error_set(error, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ITEM,
640 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
643 rte_be_to_cpu_32(tenant_id_be);
644 filter->tunnel_type =
645 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
648 case RTE_FLOW_ITEM_TYPE_NVGRE:
650 (const struct rte_flow_item_nvgre *)item->spec;
652 (const struct rte_flow_item_nvgre *)item->mask;
653 /* Check if NVGRE item is used to describe protocol.
654 * If yes, both spec and mask should be NULL.
655 * If no, both spec and mask shouldn't be NULL.
657 if ((!nvgre_spec && nvgre_mask) ||
658 (nvgre_spec && !nvgre_mask)) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ITEM,
662 "Invalid NVGRE item");
666 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
667 nvgre_spec->protocol != 0x6558) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ITEM,
671 "Invalid NVGRE item");
675 if (nvgre_spec && nvgre_mask) {
677 !!memcmp(nvgre_mask->tni, tni_mask,
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
686 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
689 rte_be_to_cpu_32(tenant_id_be);
690 filter->tunnel_type =
691 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
694 case RTE_FLOW_ITEM_TYPE_VF:
695 vf_spec = (const struct rte_flow_item_vf *)item->spec;
698 rte_flow_error_set(error, EINVAL,
699 RTE_FLOW_ERROR_TYPE_ITEM,
701 "Configuring on a VF!");
705 if (vf >= bp->pdev->max_vfs) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
713 filter->mirror_vnic_id =
714 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
716 /* This simply indicates there's no driver
717 * loaded. This is not an error.
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
722 "Unable to get default VNIC for VF");
725 filter->mirror_vnic_id = dflt_vnic;
726 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
733 filter->enables = en;
738 /* Parse attributes */
740 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
741 struct rte_flow_error *error)
743 /* Must be input direction */
744 if (!attr->ingress) {
745 rte_flow_error_set(error, EINVAL,
746 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
747 attr, "Only support ingress.");
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
755 attr, "No support for egress.");
760 if (attr->priority) {
761 rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
763 attr, "No support for priority.");
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
771 attr, "No support for group.");
778 struct bnxt_filter_info *
779 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
780 struct bnxt_vnic_info *vnic)
782 struct bnxt_filter_info *filter1, *f0;
783 struct bnxt_vnic_info *vnic0;
786 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
787 f0 = STAILQ_FIRST(&vnic0->filter);
789 //This flow has same DST MAC as the port/l2 filter.
790 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
793 //This flow needs DST MAC which is not same as port/l2
794 RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
795 filter1 = bnxt_get_unused_filter(bp);
798 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
799 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
800 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
801 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
802 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
803 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
806 bnxt_free_filter(bp, filter1);
809 STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
814 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
815 const struct rte_flow_item pattern[],
816 const struct rte_flow_action actions[],
817 const struct rte_flow_attr *attr,
818 struct rte_flow_error *error,
819 struct bnxt_filter_info *filter)
821 const struct rte_flow_action *act = nxt_non_void_action(actions);
822 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
823 const struct rte_flow_action_queue *act_q;
824 const struct rte_flow_action_vf *act_vf;
825 struct bnxt_vnic_info *vnic, *vnic0;
826 struct bnxt_filter_info *filter1;
831 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
832 RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
835 "Cannot create flow on RSS queues");
840 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
844 rc = bnxt_flow_parse_attr(attr, error);
847 //Since we support ingress attribute only - right now.
848 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
851 case RTE_FLOW_ACTION_TYPE_QUEUE:
852 /* Allow this flow. Redirect to a VNIC. */
853 act_q = (const struct rte_flow_action_queue *)act->conf;
854 if (act_q->index >= bp->rx_nr_rings) {
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ACTION, act,
857 "Invalid queue ID.");
861 RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
863 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
864 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
866 rte_flow_error_set(error, EINVAL,
867 RTE_FLOW_ERROR_TYPE_ACTION, act,
868 "No matching VNIC for queue ID.");
872 filter->dst_id = vnic->fw_vnic_id;
873 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
874 if (filter1 == NULL) {
878 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
879 RTE_LOG(DEBUG, PMD, "VNIC found\n");
881 case RTE_FLOW_ACTION_TYPE_DROP:
882 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
883 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
884 if (filter1 == NULL) {
888 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
889 if (filter->filter_type == HWRM_CFA_EM_FILTER)
891 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
894 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
896 case RTE_FLOW_ACTION_TYPE_COUNT:
897 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
898 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
899 if (filter1 == NULL) {
903 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
904 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
906 case RTE_FLOW_ACTION_TYPE_VF:
907 act_vf = (const struct rte_flow_action_vf *)act->conf;
910 rte_flow_error_set(error, EINVAL,
911 RTE_FLOW_ERROR_TYPE_ACTION,
913 "Configuring on a VF!");
918 if (vf >= bp->pdev->max_vfs) {
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ACTION,
927 filter->mirror_vnic_id =
928 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
930 /* This simply indicates there's no driver loaded.
931 * This is not an error.
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ACTION,
936 "Unable to get default VNIC for VF");
940 filter->mirror_vnic_id = dflt_vnic;
941 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
943 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
944 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
945 if (filter1 == NULL) {
949 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ACTION, act,
961 act = nxt_non_void_action(++act);
962 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
963 rte_flow_error_set(error, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ACTION,
965 act, "Invalid action.");
974 bnxt_flow_validate(struct rte_eth_dev *dev,
975 const struct rte_flow_attr *attr,
976 const struct rte_flow_item pattern[],
977 const struct rte_flow_action actions[],
978 struct rte_flow_error *error)
980 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
981 struct bnxt_filter_info *filter;
984 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
988 filter = bnxt_get_unused_filter(bp);
989 if (filter == NULL) {
990 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
994 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
996 /* No need to hold on to this filter if we are just validating flow */
997 filter->fw_l2_filter_id = -1;
998 bnxt_free_filter(bp, filter);
1004 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1006 struct bnxt_filter_info *mf;
1007 struct rte_flow *flow;
1010 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1011 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1013 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1016 if (mf->filter_type == nf->filter_type &&
1017 mf->flags == nf->flags &&
1018 mf->src_port == nf->src_port &&
1019 mf->src_port_mask == nf->src_port_mask &&
1020 mf->dst_port == nf->dst_port &&
1021 mf->dst_port_mask == nf->dst_port_mask &&
1022 mf->ip_protocol == nf->ip_protocol &&
1023 mf->ip_addr_type == nf->ip_addr_type &&
1024 mf->ethertype == nf->ethertype &&
1025 mf->vni == nf->vni &&
1026 mf->tunnel_type == nf->tunnel_type &&
1027 mf->l2_ovlan == nf->l2_ovlan &&
1028 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1029 mf->l2_ivlan == nf->l2_ivlan &&
1030 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1031 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1032 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1034 !memcmp(mf->src_macaddr, nf->src_macaddr,
1036 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1038 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1039 sizeof(nf->src_ipaddr)) &&
1040 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1041 sizeof(nf->src_ipaddr_mask)) &&
1042 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1043 sizeof(nf->dst_ipaddr)) &&
1044 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1045 sizeof(nf->dst_ipaddr_mask)))
1052 static struct rte_flow *
1053 bnxt_flow_create(struct rte_eth_dev *dev,
1054 const struct rte_flow_attr *attr,
1055 const struct rte_flow_item pattern[],
1056 const struct rte_flow_action actions[],
1057 struct rte_flow_error *error)
1059 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1060 struct bnxt_filter_info *filter;
1061 struct bnxt_vnic_info *vnic = NULL;
1062 struct rte_flow *flow;
1066 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1068 rte_flow_error_set(error, ENOMEM,
1069 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1070 "Failed to allocate memory");
1074 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1076 RTE_LOG(ERR, PMD, "Not a validate flow.\n");
1080 filter = bnxt_get_unused_filter(bp);
1081 if (filter == NULL) {
1082 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
1086 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1091 ret = bnxt_match_filter(bp, filter);
1093 RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
1097 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1099 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1100 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1102 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1104 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1105 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1108 for (i = 0; i < bp->nr_vnics; i++) {
1109 vnic = &bp->vnic_info[i];
1110 if (filter->dst_id == vnic->fw_vnic_id)
1115 flow->filter = filter;
1117 RTE_LOG(ERR, PMD, "Successfully created flow.\n");
1118 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1122 filter->fw_l2_filter_id = -1;
1123 bnxt_free_filter(bp, filter);
1126 rte_flow_error_set(error, ret,
1127 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128 "Matching Flow exists.");
1130 rte_flow_error_set(error, -ret,
1131 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1132 "Failed to create flow.");
1139 bnxt_flow_destroy(struct rte_eth_dev *dev,
1140 struct rte_flow *flow,
1141 struct rte_flow_error *error)
1143 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1144 struct bnxt_filter_info *filter = flow->filter;
1145 struct bnxt_vnic_info *vnic = flow->vnic;
1148 ret = bnxt_match_filter(bp, filter);
1150 RTE_LOG(ERR, PMD, "Could not find matching flow\n");
1151 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1152 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1153 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1154 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1157 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1160 rte_flow_error_set(error, -ret,
1161 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1162 "Failed to destroy flow.");
1169 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1171 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1172 struct bnxt_vnic_info *vnic;
1173 struct rte_flow *flow;
1177 for (i = 0; i < bp->nr_vnics; i++) {
1178 vnic = &bp->vnic_info[i];
1179 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1180 struct bnxt_filter_info *filter = flow->filter;
1182 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1183 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1184 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1185 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1188 rte_flow_error_set(error, -ret,
1189 RTE_FLOW_ERROR_TYPE_HANDLE,
1191 "Failed to flush flow in HW.");
1195 STAILQ_REMOVE(&vnic->flow_list, flow,
1204 const struct rte_flow_ops bnxt_flow_ops = {
1205 .validate = bnxt_flow_validate,
1206 .create = bnxt_flow_create,
1207 .destroy = bnxt_flow_destroy,
1208 .flush = bnxt_flow_flush,