4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
37 #include <rte_malloc.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
54 struct bnxt_filter_info *filter;
56 /* Find the 1st unused filter from the free_filter_list pool*/
57 filter = STAILQ_FIRST(&bp->free_filter_list);
59 RTE_LOG(ERR, PMD, "No more free filter resources\n");
62 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
64 /* Default to L2 MAC Addr filter */
65 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
70 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
76 struct bnxt_filter_info *filter;
78 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
80 RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
85 filter->fw_l2_filter_id = UINT64_MAX;
86 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
90 void bnxt_init_filters(struct bnxt *bp)
92 struct bnxt_filter_info *filter;
95 max_filters = bp->max_l2_ctx;
96 STAILQ_INIT(&bp->free_filter_list);
97 for (i = 0; i < max_filters; i++) {
98 filter = &bp->filter_info[i];
99 filter->fw_l2_filter_id = -1;
100 filter->fw_em_filter_id = -1;
101 filter->fw_ntuple_filter_id = -1;
102 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
106 void bnxt_free_all_filters(struct bnxt *bp)
108 struct bnxt_vnic_info *vnic;
109 struct bnxt_filter_info *filter, *temp_filter;
112 for (i = 0; i < MAX_FF_POOLS; i++) {
113 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114 filter = STAILQ_FIRST(&vnic->filter);
116 temp_filter = STAILQ_NEXT(filter, next);
117 STAILQ_REMOVE(&vnic->filter, filter,
118 bnxt_filter_info, next);
119 STAILQ_INSERT_TAIL(&bp->free_filter_list,
121 filter = temp_filter;
123 STAILQ_INIT(&vnic->filter);
127 for (i = 0; i < bp->pf.max_vfs; i++) {
128 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129 bnxt_hwrm_clear_l2_filter(bp, filter);
134 void bnxt_free_filter_mem(struct bnxt *bp)
136 struct bnxt_filter_info *filter;
137 uint16_t max_filters, i;
140 if (bp->filter_info == NULL)
143 /* Ensure that all filters are freed */
144 max_filters = bp->max_l2_ctx;
145 for (i = 0; i < max_filters; i++) {
146 filter = &bp->filter_info[i];
147 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148 RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
149 /* Call HWRM to try to free filter again */
150 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
153 "HWRM filter cannot be freed rc = %d\n",
156 filter->fw_l2_filter_id = UINT64_MAX;
158 STAILQ_INIT(&bp->free_filter_list);
160 rte_free(bp->filter_info);
161 bp->filter_info = NULL;
164 int bnxt_alloc_filter_mem(struct bnxt *bp)
166 struct bnxt_filter_info *filter_mem;
167 uint16_t max_filters;
169 max_filters = bp->max_l2_ctx;
170 /* Allocate memory for VNIC pool and filter pool */
171 filter_mem = rte_zmalloc("bnxt_filter_info",
172 max_filters * sizeof(struct bnxt_filter_info),
174 if (filter_mem == NULL) {
175 RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
179 bp->filter_info = filter_mem;
183 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
185 struct bnxt_filter_info *filter;
187 /* Find the 1st unused filter from the free_filter_list pool*/
188 filter = STAILQ_FIRST(&bp->free_filter_list);
190 RTE_LOG(ERR, PMD, "No more free filter resources\n");
193 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
198 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
200 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
204 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
205 const struct rte_flow_item pattern[],
206 const struct rte_flow_action actions[],
207 struct rte_flow_error *error)
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212 NULL, "NULL pattern.");
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219 NULL, "NULL action.");
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ATTR,
226 NULL, "NULL attribute.");
233 static const struct rte_flow_item *
234 nxt_non_void_pattern(const struct rte_flow_item *cur)
237 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
243 static const struct rte_flow_action *
244 nxt_non_void_action(const struct rte_flow_action *cur)
247 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
253 static inline int check_zero_bytes(const uint8_t *bytes, int len)
256 for (i = 0; i < len; i++)
257 if (bytes[i] != 0x00)
263 bnxt_filter_type_check(const struct rte_flow_item pattern[],
264 struct rte_flow_error *error __rte_unused)
266 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
269 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
270 switch (item->type) {
271 case RTE_FLOW_ITEM_TYPE_ETH:
274 case RTE_FLOW_ITEM_TYPE_VLAN:
277 case RTE_FLOW_ITEM_TYPE_IPV4:
278 case RTE_FLOW_ITEM_TYPE_IPV6:
279 case RTE_FLOW_ITEM_TYPE_TCP:
280 case RTE_FLOW_ITEM_TYPE_UDP:
282 /* need ntuple match, reset exact match */
285 "VLAN flow cannot use NTUPLE filter\n");
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM,
289 "Cannot use VLAN with NTUPLE");
295 RTE_LOG(ERR, PMD, "Unknown Flow type");
304 bnxt_validate_and_parse_flow_type(const struct rte_flow_item pattern[],
305 struct rte_flow_error *error,
306 struct bnxt_filter_info *filter)
308 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
309 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
310 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
311 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
312 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
313 const struct rte_flow_item_udp *udp_spec, *udp_mask;
314 const struct rte_flow_item_eth *eth_spec, *eth_mask;
315 const struct rte_flow_item_nvgre *nvgre_spec;
316 const struct rte_flow_item_nvgre *nvgre_mask;
317 const struct rte_flow_item_vxlan *vxlan_spec;
318 const struct rte_flow_item_vxlan *vxlan_mask;
319 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
320 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
321 uint32_t tenant_id_be = 0;
327 use_ntuple = bnxt_filter_type_check(pattern, error);
328 RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
332 filter->filter_type = use_ntuple ?
333 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
335 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
337 /* last or range is NOT supported as match criteria */
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM,
341 "No support for range");
344 if (!item->spec || !item->mask) {
345 rte_flow_error_set(error, EINVAL,
346 RTE_FLOW_ERROR_TYPE_ITEM,
348 "spec/mask is NULL");
351 switch (item->type) {
352 case RTE_FLOW_ITEM_TYPE_ETH:
353 eth_spec = (const struct rte_flow_item_eth *)item->spec;
354 eth_mask = (const struct rte_flow_item_eth *)item->mask;
356 /* Source MAC address mask cannot be partially set.
357 * Should be All 0's or all 1's.
358 * Destination MAC address mask must not be partially
359 * set. Should be all 1's or all 0's.
361 if ((!is_zero_ether_addr(ð_mask->src) &&
362 !is_broadcast_ether_addr(ð_mask->src)) ||
363 (!is_zero_ether_addr(ð_mask->dst) &&
364 !is_broadcast_ether_addr(ð_mask->dst))) {
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
368 "MAC_addr mask not valid");
372 /* Mask is not allowed. Only exact matches are */
373 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_ITEM,
377 "ethertype mask not valid");
381 if (is_broadcast_ether_addr(ð_mask->dst)) {
382 rte_memcpy(filter->dst_macaddr,
385 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
386 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
388 if (is_broadcast_ether_addr(ð_mask->src)) {
389 rte_memcpy(filter->src_macaddr,
392 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
393 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
396 * RTE_LOG(ERR, PMD, "Handle this condition\n");
399 if (eth_spec->type) {
401 rte_be_to_cpu_16(eth_spec->type);
403 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
404 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
408 case RTE_FLOW_ITEM_TYPE_VLAN:
410 (const struct rte_flow_item_vlan *)item->spec;
412 (const struct rte_flow_item_vlan *)item->mask;
413 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
414 /* Only the VLAN ID can be matched. */
416 rte_be_to_cpu_16(vlan_spec->tci &
418 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ITEM,
423 "VLAN mask is invalid");
428 case RTE_FLOW_ITEM_TYPE_IPV4:
429 /* If mask is not involved, we could use EM filters. */
431 (const struct rte_flow_item_ipv4 *)item->spec;
433 (const struct rte_flow_item_ipv4 *)item->mask;
434 /* Only IP DST and SRC fields are maskable. */
435 if (ipv4_mask->hdr.version_ihl ||
436 ipv4_mask->hdr.type_of_service ||
437 ipv4_mask->hdr.total_length ||
438 ipv4_mask->hdr.packet_id ||
439 ipv4_mask->hdr.fragment_offset ||
440 ipv4_mask->hdr.time_to_live ||
441 ipv4_mask->hdr.next_proto_id ||
442 ipv4_mask->hdr.hdr_checksum) {
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM,
446 "Invalid IPv4 mask.");
449 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
450 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
452 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
453 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
455 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
456 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
457 if (ipv4_mask->hdr.src_addr) {
458 filter->src_ipaddr_mask[0] =
459 ipv4_mask->hdr.src_addr;
460 en |= !use_ntuple ? 0 :
461 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
463 if (ipv4_mask->hdr.dst_addr) {
464 filter->dst_ipaddr_mask[0] =
465 ipv4_mask->hdr.dst_addr;
466 en |= !use_ntuple ? 0 :
467 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
469 filter->ip_addr_type = use_ntuple ?
470 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
471 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
472 if (ipv4_spec->hdr.next_proto_id) {
473 filter->ip_protocol =
474 ipv4_spec->hdr.next_proto_id;
476 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
478 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
481 case RTE_FLOW_ITEM_TYPE_IPV6:
483 (const struct rte_flow_item_ipv6 *)item->spec;
485 (const struct rte_flow_item_ipv6 *)item->mask;
487 /* Only IP DST and SRC fields are maskable. */
488 if (ipv6_mask->hdr.vtc_flow ||
489 ipv6_mask->hdr.payload_len ||
490 ipv6_mask->hdr.proto ||
491 ipv6_mask->hdr.hop_limits) {
492 rte_flow_error_set(error, EINVAL,
493 RTE_FLOW_ERROR_TYPE_ITEM,
495 "Invalid IPv6 mask.");
500 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
501 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
503 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
504 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
505 rte_memcpy(filter->src_ipaddr,
506 ipv6_spec->hdr.src_addr, 16);
507 rte_memcpy(filter->dst_ipaddr,
508 ipv6_spec->hdr.dst_addr, 16);
509 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
510 rte_memcpy(filter->src_ipaddr_mask,
511 ipv6_mask->hdr.src_addr, 16);
512 en |= !use_ntuple ? 0 :
513 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
515 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
516 rte_memcpy(filter->dst_ipaddr_mask,
517 ipv6_mask->hdr.dst_addr, 16);
518 en |= !use_ntuple ? 0 :
519 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
521 filter->ip_addr_type = use_ntuple ?
522 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
523 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
525 case RTE_FLOW_ITEM_TYPE_TCP:
526 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
527 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
529 /* Check TCP mask. Only DST & SRC ports are maskable */
530 if (tcp_mask->hdr.sent_seq ||
531 tcp_mask->hdr.recv_ack ||
532 tcp_mask->hdr.data_off ||
533 tcp_mask->hdr.tcp_flags ||
534 tcp_mask->hdr.rx_win ||
535 tcp_mask->hdr.cksum ||
536 tcp_mask->hdr.tcp_urp) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
543 filter->src_port = tcp_spec->hdr.src_port;
544 filter->dst_port = tcp_spec->hdr.dst_port;
546 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
547 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
549 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
550 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
551 if (tcp_mask->hdr.dst_port) {
552 filter->dst_port_mask = tcp_mask->hdr.dst_port;
553 en |= !use_ntuple ? 0 :
554 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
556 if (tcp_mask->hdr.src_port) {
557 filter->src_port_mask = tcp_mask->hdr.src_port;
558 en |= !use_ntuple ? 0 :
559 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
562 case RTE_FLOW_ITEM_TYPE_UDP:
563 udp_spec = (const struct rte_flow_item_udp *)item->spec;
564 udp_mask = (const struct rte_flow_item_udp *)item->mask;
566 if (udp_mask->hdr.dgram_len ||
567 udp_mask->hdr.dgram_cksum) {
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM,
575 filter->src_port = udp_spec->hdr.src_port;
576 filter->dst_port = udp_spec->hdr.dst_port;
578 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
579 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
581 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
582 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
584 if (udp_mask->hdr.dst_port) {
585 filter->dst_port_mask = udp_mask->hdr.dst_port;
586 en |= !use_ntuple ? 0 :
587 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
589 if (udp_mask->hdr.src_port) {
590 filter->src_port_mask = udp_mask->hdr.src_port;
591 en |= !use_ntuple ? 0 :
592 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
595 case RTE_FLOW_ITEM_TYPE_VXLAN:
597 (const struct rte_flow_item_vxlan *)item->spec;
599 (const struct rte_flow_item_vxlan *)item->mask;
600 /* Check if VXLAN item is used to describe protocol.
601 * If yes, both spec and mask should be NULL.
602 * If no, both spec and mask shouldn't be NULL.
604 if ((!vxlan_spec && vxlan_mask) ||
605 (vxlan_spec && !vxlan_mask)) {
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM,
609 "Invalid VXLAN item");
613 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
614 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
615 vxlan_spec->flags != 0x8) {
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
619 "Invalid VXLAN item");
623 /* Check if VNI is masked. */
624 if (vxlan_spec && vxlan_mask) {
626 !!memcmp(vxlan_mask->vni, vni_mask,
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ITEM,
636 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
639 rte_be_to_cpu_32(tenant_id_be);
640 filter->tunnel_type =
641 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
644 case RTE_FLOW_ITEM_TYPE_NVGRE:
646 (const struct rte_flow_item_nvgre *)item->spec;
648 (const struct rte_flow_item_nvgre *)item->mask;
649 /* Check if NVGRE item is used to describe protocol.
650 * If yes, both spec and mask should be NULL.
651 * If no, both spec and mask shouldn't be NULL.
653 if ((!nvgre_spec && nvgre_mask) ||
654 (nvgre_spec && !nvgre_mask)) {
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_ITEM,
658 "Invalid NVGRE item");
662 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
663 nvgre_spec->protocol != 0x6558) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ITEM,
667 "Invalid NVGRE item");
671 if (nvgre_spec && nvgre_mask) {
673 !!memcmp(nvgre_mask->tni, tni_mask,
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ITEM,
682 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
685 rte_be_to_cpu_32(tenant_id_be);
686 filter->tunnel_type =
687 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
695 filter->enables = en;
700 /* Parse attributes */
702 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
703 struct rte_flow_error *error)
705 /* Must be input direction */
706 if (!attr->ingress) {
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
709 attr, "Only support ingress.");
715 rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
717 attr, "No support for egress.");
722 if (attr->priority) {
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
725 attr, "No support for priority.");
731 rte_flow_error_set(error, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
733 attr, "No support for group.");
740 struct bnxt_filter_info *
741 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
742 struct bnxt_vnic_info *vnic)
744 struct bnxt_filter_info *filter1, *f0;
745 struct bnxt_vnic_info *vnic0;
748 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
749 f0 = STAILQ_FIRST(&vnic0->filter);
751 //This flow has same DST MAC as the port/l2 filter.
752 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
755 //This flow needs DST MAC which is not same as port/l2
756 RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
757 filter1 = bnxt_get_unused_filter(bp);
758 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
759 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
760 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
761 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
762 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
763 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
766 bnxt_free_filter(bp, filter1);
769 STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
774 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
775 const struct rte_flow_item pattern[],
776 const struct rte_flow_action actions[],
777 const struct rte_flow_attr *attr,
778 struct rte_flow_error *error,
779 struct bnxt_filter_info *filter)
781 const struct rte_flow_action *act = nxt_non_void_action(actions);
782 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
783 const struct rte_flow_action_queue *act_q;
784 struct bnxt_vnic_info *vnic, *vnic0;
785 struct bnxt_filter_info *filter1;
788 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
789 RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
790 rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
792 "Cannot create flow on RSS queues");
797 rc = bnxt_validate_and_parse_flow_type(pattern, error, filter);
801 rc = bnxt_flow_parse_attr(attr, error);
804 //Since we support ingress attribute only - right now.
805 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
808 case RTE_FLOW_ACTION_TYPE_QUEUE:
809 /* Allow this flow. Redirect to a VNIC. */
810 act_q = (const struct rte_flow_action_queue *)act->conf;
811 if (act_q->index >= bp->rx_nr_rings) {
812 rte_flow_error_set(error, EINVAL,
813 RTE_FLOW_ERROR_TYPE_ACTION, act,
814 "Invalid queue ID.");
818 RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
820 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
821 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ACTION, act,
825 "No matching VNIC for queue ID.");
829 filter->dst_id = vnic->fw_vnic_id;
830 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
831 if (filter1 == NULL) {
835 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
836 RTE_LOG(DEBUG, PMD, "VNIC found\n");
838 case RTE_FLOW_ACTION_TYPE_DROP:
839 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
840 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
841 if (filter1 == NULL) {
845 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
846 if (filter->filter_type == HWRM_CFA_EM_FILTER)
848 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
851 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
853 case RTE_FLOW_ACTION_TYPE_COUNT:
854 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
855 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
856 if (filter1 == NULL) {
860 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
861 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ACTION, act,
871 act = nxt_non_void_action(++act);
872 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ACTION,
875 act, "Invalid action.");
884 bnxt_flow_validate(struct rte_eth_dev *dev,
885 const struct rte_flow_attr *attr,
886 const struct rte_flow_item pattern[],
887 const struct rte_flow_action actions[],
888 struct rte_flow_error *error)
890 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
891 struct bnxt_filter_info *filter;
894 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
898 filter = bnxt_get_unused_filter(bp);
899 if (filter == NULL) {
900 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
904 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
906 /* No need to hold on to this filter if we are just validating flow */
907 filter->fw_l2_filter_id = -1;
908 bnxt_free_filter(bp, filter);
914 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
916 struct bnxt_filter_info *mf;
917 struct rte_flow *flow;
920 for (i = bp->nr_vnics - 1; i >= 0; i--) {
921 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
923 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
926 if (mf->filter_type == nf->filter_type &&
927 mf->flags == nf->flags &&
928 mf->src_port == nf->src_port &&
929 mf->src_port_mask == nf->src_port_mask &&
930 mf->dst_port == nf->dst_port &&
931 mf->dst_port_mask == nf->dst_port_mask &&
932 mf->ip_protocol == nf->ip_protocol &&
933 mf->ip_addr_type == nf->ip_addr_type &&
934 mf->ethertype == nf->ethertype &&
935 mf->vni == nf->vni &&
936 mf->tunnel_type == nf->tunnel_type &&
937 mf->l2_ovlan == nf->l2_ovlan &&
938 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
939 mf->l2_ivlan == nf->l2_ivlan &&
940 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
941 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
942 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
944 !memcmp(mf->src_macaddr, nf->src_macaddr,
946 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
948 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
949 sizeof(nf->src_ipaddr)) &&
950 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
951 sizeof(nf->src_ipaddr_mask)) &&
952 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
953 sizeof(nf->dst_ipaddr)) &&
954 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
955 sizeof(nf->dst_ipaddr_mask)))
962 static struct rte_flow *
963 bnxt_flow_create(struct rte_eth_dev *dev,
964 const struct rte_flow_attr *attr,
965 const struct rte_flow_item pattern[],
966 const struct rte_flow_action actions[],
967 struct rte_flow_error *error)
969 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
970 struct bnxt_filter_info *filter;
971 struct bnxt_vnic_info *vnic = NULL;
972 struct rte_flow *flow;
976 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
978 rte_flow_error_set(error, ENOMEM,
979 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
980 "Failed to allocate memory");
984 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
986 RTE_LOG(ERR, PMD, "Not a validate flow.\n");
990 filter = bnxt_get_unused_filter(bp);
991 if (filter == NULL) {
992 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
996 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1001 ret = bnxt_match_filter(bp, filter);
1003 RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
1007 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1009 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1010 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1012 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1014 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1015 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1018 for (i = 0; i < bp->nr_vnics; i++) {
1019 vnic = &bp->vnic_info[i];
1020 if (filter->dst_id == vnic->fw_vnic_id)
1025 flow->filter = filter;
1027 RTE_LOG(ERR, PMD, "Successfully created flow.\n");
1028 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1032 filter->fw_l2_filter_id = -1;
1033 bnxt_free_filter(bp, filter);
1036 rte_flow_error_set(error, ret,
1037 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1038 "Matching Flow exists.");
1040 rte_flow_error_set(error, -ret,
1041 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1042 "Failed to create flow.");
1049 bnxt_flow_destroy(struct rte_eth_dev *dev,
1050 struct rte_flow *flow,
1051 struct rte_flow_error *error)
1053 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1054 struct bnxt_filter_info *filter = flow->filter;
1055 struct bnxt_vnic_info *vnic = flow->vnic;
1058 ret = bnxt_match_filter(bp, filter);
1060 RTE_LOG(ERR, PMD, "Could not find matching flow\n");
1061 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1062 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1063 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1064 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1067 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1070 rte_flow_error_set(error, -ret,
1071 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1072 "Failed to destroy flow.");
1079 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1081 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1082 struct bnxt_vnic_info *vnic;
1083 struct rte_flow *flow;
1087 for (i = 0; i < bp->nr_vnics; i++) {
1088 vnic = &bp->vnic_info[i];
1089 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1090 struct bnxt_filter_info *filter = flow->filter;
1092 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1093 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1094 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1095 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1098 rte_flow_error_set(error, -ret,
1099 RTE_FLOW_ERROR_TYPE_HANDLE,
1101 "Failed to flush flow in HW.");
1105 STAILQ_REMOVE(&vnic->flow_list, flow,
1114 const struct rte_flow_ops bnxt_flow_ops = {
1115 .validate = bnxt_flow_validate,
1116 .create = bnxt_flow_create,
1117 .destroy = bnxt_flow_destroy,
1118 .flush = bnxt_flow_flush,