1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
27 struct bnxt_filter_info *filter;
29 /* Find the 1st unused filter from the free_filter_list pool*/
30 filter = STAILQ_FIRST(&bp->free_filter_list);
32 PMD_DRV_LOG(ERR, "No more free filter resources\n");
35 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
37 /* Default to L2 MAC Addr filter */
38 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
43 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
49 struct bnxt_filter_info *filter;
51 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
53 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
58 filter->fw_l2_filter_id = UINT64_MAX;
59 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
63 void bnxt_init_filters(struct bnxt *bp)
65 struct bnxt_filter_info *filter;
68 max_filters = bp->max_l2_ctx;
69 STAILQ_INIT(&bp->free_filter_list);
70 for (i = 0; i < max_filters; i++) {
71 filter = &bp->filter_info[i];
72 filter->fw_l2_filter_id = UINT64_MAX;
73 filter->fw_em_filter_id = UINT64_MAX;
74 filter->fw_ntuple_filter_id = UINT64_MAX;
75 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
79 void bnxt_free_all_filters(struct bnxt *bp)
81 struct bnxt_vnic_info *vnic;
82 struct bnxt_filter_info *filter, *temp_filter;
85 for (i = 0; i < MAX_FF_POOLS; i++) {
86 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87 filter = STAILQ_FIRST(&vnic->filter);
89 temp_filter = STAILQ_NEXT(filter, next);
90 STAILQ_REMOVE(&vnic->filter, filter,
91 bnxt_filter_info, next);
92 STAILQ_INSERT_TAIL(&bp->free_filter_list,
96 STAILQ_INIT(&vnic->filter);
100 for (i = 0; i < bp->pf.max_vfs; i++) {
101 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102 bnxt_hwrm_clear_l2_filter(bp, filter);
107 void bnxt_free_filter_mem(struct bnxt *bp)
109 struct bnxt_filter_info *filter;
110 uint16_t max_filters, i;
113 if (bp->filter_info == NULL)
116 /* Ensure that all filters are freed */
117 max_filters = bp->max_l2_ctx;
118 for (i = 0; i < max_filters; i++) {
119 filter = &bp->filter_info[i];
120 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122 /* Call HWRM to try to free filter again */
123 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
126 "HWRM filter cannot be freed rc = %d\n",
129 filter->fw_l2_filter_id = UINT64_MAX;
131 STAILQ_INIT(&bp->free_filter_list);
133 rte_free(bp->filter_info);
134 bp->filter_info = NULL;
136 for (i = 0; i < bp->pf.max_vfs; i++) {
137 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
139 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140 bnxt_filter_info, next);
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
147 struct bnxt_filter_info *filter_mem;
148 uint16_t max_filters;
150 max_filters = bp->max_l2_ctx;
151 /* Allocate memory for VNIC pool and filter pool */
152 filter_mem = rte_zmalloc("bnxt_filter_info",
153 max_filters * sizeof(struct bnxt_filter_info),
155 if (filter_mem == NULL) {
156 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
160 bp->filter_info = filter_mem;
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
166 struct bnxt_filter_info *filter;
168 /* Find the 1st unused filter from the free_filter_list pool*/
169 filter = STAILQ_FIRST(&bp->free_filter_list);
171 PMD_DRV_LOG(ERR, "No more free filter resources\n");
174 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
181 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186 const struct rte_flow_item pattern[],
187 const struct rte_flow_action actions[],
188 struct rte_flow_error *error)
191 rte_flow_error_set(error, EINVAL,
192 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193 NULL, "NULL pattern.");
198 rte_flow_error_set(error, EINVAL,
199 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200 NULL, "NULL action.");
205 rte_flow_error_set(error, EINVAL,
206 RTE_FLOW_ERROR_TYPE_ATTR,
207 NULL, "NULL attribute.");
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
218 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
228 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
234 int check_zero_bytes(const uint8_t *bytes, int len)
237 for (i = 0; i < len; i++)
238 if (bytes[i] != 0x00)
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245 struct rte_flow_error *error __rte_unused)
247 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
250 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251 switch (item->type) {
252 case RTE_FLOW_ITEM_TYPE_ETH:
255 case RTE_FLOW_ITEM_TYPE_VLAN:
258 case RTE_FLOW_ITEM_TYPE_IPV4:
259 case RTE_FLOW_ITEM_TYPE_IPV6:
260 case RTE_FLOW_ITEM_TYPE_TCP:
261 case RTE_FLOW_ITEM_TYPE_UDP:
263 /* need ntuple match, reset exact match */
266 "VLAN flow cannot use NTUPLE filter\n");
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
270 "Cannot use VLAN with NTUPLE");
276 PMD_DRV_LOG(ERR, "Unknown Flow type");
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286 const struct rte_flow_item pattern[],
287 struct rte_flow_error *error,
288 struct bnxt_filter_info *filter)
290 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
291 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
292 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
293 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
294 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
295 const struct rte_flow_item_udp *udp_spec, *udp_mask;
296 const struct rte_flow_item_eth *eth_spec, *eth_mask;
297 const struct rte_flow_item_nvgre *nvgre_spec;
298 const struct rte_flow_item_nvgre *nvgre_mask;
299 const struct rte_flow_item_vxlan *vxlan_spec;
300 const struct rte_flow_item_vxlan *vxlan_mask;
301 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
302 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
303 const struct rte_flow_item_vf *vf_spec;
304 uint32_t tenant_id_be = 0;
312 use_ntuple = bnxt_filter_type_check(pattern, error);
313 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
317 filter->filter_type = use_ntuple ?
318 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
320 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
322 /* last or range is NOT supported as match criteria */
323 rte_flow_error_set(error, EINVAL,
324 RTE_FLOW_ERROR_TYPE_ITEM,
326 "No support for range");
329 if (!item->spec || !item->mask) {
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM,
333 "spec/mask is NULL");
336 switch (item->type) {
337 case RTE_FLOW_ITEM_TYPE_ETH:
338 eth_spec = item->spec;
339 eth_mask = item->mask;
341 /* Source MAC address mask cannot be partially set.
342 * Should be All 0's or all 1's.
343 * Destination MAC address mask must not be partially
344 * set. Should be all 1's or all 0's.
346 if ((!is_zero_ether_addr(ð_mask->src) &&
347 !is_broadcast_ether_addr(ð_mask->src)) ||
348 (!is_zero_ether_addr(ð_mask->dst) &&
349 !is_broadcast_ether_addr(ð_mask->dst))) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM,
353 "MAC_addr mask not valid");
357 /* Mask is not allowed. Only exact matches are */
358 if (eth_mask->type &&
359 eth_mask->type != RTE_BE16(0xffff)) {
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ITEM,
363 "ethertype mask not valid");
367 if (is_broadcast_ether_addr(ð_mask->dst)) {
368 rte_memcpy(filter->dst_macaddr,
371 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
372 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
374 if (is_broadcast_ether_addr(ð_mask->src)) {
375 rte_memcpy(filter->src_macaddr,
378 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
379 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
382 * RTE_LOG(ERR, PMD, "Handle this condition\n");
385 if (eth_mask->type) {
387 rte_be_to_cpu_16(eth_spec->type);
389 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
390 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
394 case RTE_FLOW_ITEM_TYPE_VLAN:
395 vlan_spec = item->spec;
396 vlan_mask = item->mask;
397 if (vlan_mask->tci &&
398 vlan_mask->tci == RTE_BE16(0x0fff) &&
400 /* Only the VLAN ID can be matched. */
402 rte_be_to_cpu_16(vlan_spec->tci &
404 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
405 } else if (vlan_mask->tci || vlan_mask->tpid) {
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ITEM,
409 "VLAN mask is invalid");
414 case RTE_FLOW_ITEM_TYPE_IPV4:
415 /* If mask is not involved, we could use EM filters. */
416 ipv4_spec = item->spec;
417 ipv4_mask = item->mask;
418 /* Only IP DST and SRC fields are maskable. */
419 if (ipv4_mask->hdr.version_ihl ||
420 ipv4_mask->hdr.type_of_service ||
421 ipv4_mask->hdr.total_length ||
422 ipv4_mask->hdr.packet_id ||
423 ipv4_mask->hdr.fragment_offset ||
424 ipv4_mask->hdr.time_to_live ||
425 ipv4_mask->hdr.next_proto_id ||
426 ipv4_mask->hdr.hdr_checksum) {
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ITEM,
430 "Invalid IPv4 mask.");
433 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
434 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
436 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
437 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
439 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
440 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
441 if (ipv4_mask->hdr.src_addr) {
442 filter->src_ipaddr_mask[0] =
443 ipv4_mask->hdr.src_addr;
444 en |= !use_ntuple ? 0 :
445 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
447 if (ipv4_mask->hdr.dst_addr) {
448 filter->dst_ipaddr_mask[0] =
449 ipv4_mask->hdr.dst_addr;
450 en |= !use_ntuple ? 0 :
451 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
453 filter->ip_addr_type = use_ntuple ?
454 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
455 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
456 if (ipv4_spec->hdr.next_proto_id) {
457 filter->ip_protocol =
458 ipv4_spec->hdr.next_proto_id;
460 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
462 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
465 case RTE_FLOW_ITEM_TYPE_IPV6:
466 ipv6_spec = item->spec;
467 ipv6_mask = item->mask;
469 /* Only IP DST and SRC fields are maskable. */
470 if (ipv6_mask->hdr.vtc_flow ||
471 ipv6_mask->hdr.payload_len ||
472 ipv6_mask->hdr.proto ||
473 ipv6_mask->hdr.hop_limits) {
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
477 "Invalid IPv6 mask.");
482 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
483 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
485 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
486 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
487 rte_memcpy(filter->src_ipaddr,
488 ipv6_spec->hdr.src_addr, 16);
489 rte_memcpy(filter->dst_ipaddr,
490 ipv6_spec->hdr.dst_addr, 16);
491 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
492 rte_memcpy(filter->src_ipaddr_mask,
493 ipv6_mask->hdr.src_addr, 16);
494 en |= !use_ntuple ? 0 :
495 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
497 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
498 rte_memcpy(filter->dst_ipaddr_mask,
499 ipv6_mask->hdr.dst_addr, 16);
500 en |= !use_ntuple ? 0 :
501 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
503 filter->ip_addr_type = use_ntuple ?
504 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
505 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
507 case RTE_FLOW_ITEM_TYPE_TCP:
508 tcp_spec = item->spec;
509 tcp_mask = item->mask;
511 /* Check TCP mask. Only DST & SRC ports are maskable */
512 if (tcp_mask->hdr.sent_seq ||
513 tcp_mask->hdr.recv_ack ||
514 tcp_mask->hdr.data_off ||
515 tcp_mask->hdr.tcp_flags ||
516 tcp_mask->hdr.rx_win ||
517 tcp_mask->hdr.cksum ||
518 tcp_mask->hdr.tcp_urp) {
519 rte_flow_error_set(error, EINVAL,
520 RTE_FLOW_ERROR_TYPE_ITEM,
525 filter->src_port = tcp_spec->hdr.src_port;
526 filter->dst_port = tcp_spec->hdr.dst_port;
528 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
529 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
531 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
532 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
533 if (tcp_mask->hdr.dst_port) {
534 filter->dst_port_mask = tcp_mask->hdr.dst_port;
535 en |= !use_ntuple ? 0 :
536 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
538 if (tcp_mask->hdr.src_port) {
539 filter->src_port_mask = tcp_mask->hdr.src_port;
540 en |= !use_ntuple ? 0 :
541 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
544 case RTE_FLOW_ITEM_TYPE_UDP:
545 udp_spec = item->spec;
546 udp_mask = item->mask;
548 if (udp_mask->hdr.dgram_len ||
549 udp_mask->hdr.dgram_cksum) {
550 rte_flow_error_set(error, EINVAL,
551 RTE_FLOW_ERROR_TYPE_ITEM,
557 filter->src_port = udp_spec->hdr.src_port;
558 filter->dst_port = udp_spec->hdr.dst_port;
560 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
561 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
563 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
564 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
566 if (udp_mask->hdr.dst_port) {
567 filter->dst_port_mask = udp_mask->hdr.dst_port;
568 en |= !use_ntuple ? 0 :
569 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
571 if (udp_mask->hdr.src_port) {
572 filter->src_port_mask = udp_mask->hdr.src_port;
573 en |= !use_ntuple ? 0 :
574 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
577 case RTE_FLOW_ITEM_TYPE_VXLAN:
578 vxlan_spec = item->spec;
579 vxlan_mask = item->mask;
580 /* Check if VXLAN item is used to describe protocol.
581 * If yes, both spec and mask should be NULL.
582 * If no, both spec and mask shouldn't be NULL.
584 if ((!vxlan_spec && vxlan_mask) ||
585 (vxlan_spec && !vxlan_mask)) {
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ITEM,
589 "Invalid VXLAN item");
593 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
594 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
595 vxlan_spec->flags != 0x8) {
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
599 "Invalid VXLAN item");
603 /* Check if VNI is masked. */
604 if (vxlan_spec && vxlan_mask) {
606 !!memcmp(vxlan_mask->vni, vni_mask,
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ITEM,
616 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
619 rte_be_to_cpu_32(tenant_id_be);
620 filter->tunnel_type =
621 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
624 case RTE_FLOW_ITEM_TYPE_NVGRE:
625 nvgre_spec = item->spec;
626 nvgre_mask = item->mask;
627 /* Check if NVGRE item is used to describe protocol.
628 * If yes, both spec and mask should be NULL.
629 * If no, both spec and mask shouldn't be NULL.
631 if ((!nvgre_spec && nvgre_mask) ||
632 (nvgre_spec && !nvgre_mask)) {
633 rte_flow_error_set(error, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ITEM,
636 "Invalid NVGRE item");
640 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
641 nvgre_spec->protocol != 0x6558) {
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM,
645 "Invalid NVGRE item");
649 if (nvgre_spec && nvgre_mask) {
651 !!memcmp(nvgre_mask->tni, tni_mask,
654 rte_flow_error_set(error, EINVAL,
655 RTE_FLOW_ERROR_TYPE_ITEM,
660 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
663 rte_be_to_cpu_32(tenant_id_be);
664 filter->tunnel_type =
665 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
668 case RTE_FLOW_ITEM_TYPE_VF:
669 vf_spec = item->spec;
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ITEM,
675 "Configuring on a VF!");
679 if (vf >= bp->pdev->max_vfs) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
687 filter->mirror_vnic_id =
688 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
690 /* This simply indicates there's no driver
691 * loaded. This is not an error.
693 rte_flow_error_set(error, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ITEM,
696 "Unable to get default VNIC for VF");
699 filter->mirror_vnic_id = dflt_vnic;
700 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
707 filter->enables = en;
712 /* Parse attributes */
714 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
715 struct rte_flow_error *error)
717 /* Must be input direction */
718 if (!attr->ingress) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
721 attr, "Only support ingress.");
727 rte_flow_error_set(error, EINVAL,
728 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
729 attr, "No support for egress.");
734 if (attr->priority) {
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
737 attr, "No support for priority.");
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
745 attr, "No support for group.");
752 struct bnxt_filter_info *
753 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
754 struct bnxt_vnic_info *vnic)
756 struct bnxt_filter_info *filter1, *f0;
757 struct bnxt_vnic_info *vnic0;
760 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
761 f0 = STAILQ_FIRST(&vnic0->filter);
763 //This flow has same DST MAC as the port/l2 filter.
764 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
767 //This flow needs DST MAC which is not same as port/l2
768 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
769 filter1 = bnxt_get_unused_filter(bp);
772 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
773 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
774 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
775 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
776 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
777 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
780 bnxt_free_filter(bp, filter1);
787 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
788 const struct rte_flow_item pattern[],
789 const struct rte_flow_action actions[],
790 const struct rte_flow_attr *attr,
791 struct rte_flow_error *error,
792 struct bnxt_filter_info *filter)
794 const struct rte_flow_action *act = nxt_non_void_action(actions);
795 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
796 const struct rte_flow_action_queue *act_q;
797 const struct rte_flow_action_vf *act_vf;
798 struct bnxt_vnic_info *vnic, *vnic0;
799 struct bnxt_filter_info *filter1;
804 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
805 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
808 "Cannot create flow on RSS queues");
813 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
817 rc = bnxt_flow_parse_attr(attr, error);
820 //Since we support ingress attribute only - right now.
821 if (filter->filter_type == HWRM_CFA_EM_FILTER)
822 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
825 case RTE_FLOW_ACTION_TYPE_QUEUE:
826 /* Allow this flow. Redirect to a VNIC. */
827 act_q = (const struct rte_flow_action_queue *)act->conf;
828 if (act_q->index >= bp->rx_nr_rings) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ACTION, act,
831 "Invalid queue ID.");
835 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
837 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
838 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
840 rte_flow_error_set(error, EINVAL,
841 RTE_FLOW_ERROR_TYPE_ACTION, act,
842 "No matching VNIC for queue ID.");
846 filter->dst_id = vnic->fw_vnic_id;
847 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
848 if (filter1 == NULL) {
852 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
853 PMD_DRV_LOG(DEBUG, "VNIC found\n");
855 case RTE_FLOW_ACTION_TYPE_DROP:
856 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
857 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
858 if (filter1 == NULL) {
862 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
863 if (filter->filter_type == HWRM_CFA_EM_FILTER)
865 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
868 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
870 case RTE_FLOW_ACTION_TYPE_COUNT:
871 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
872 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
873 if (filter1 == NULL) {
877 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
878 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
880 case RTE_FLOW_ACTION_TYPE_VF:
881 act_vf = (const struct rte_flow_action_vf *)act->conf;
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ACTION,
887 "Configuring on a VF!");
892 if (vf >= bp->pdev->max_vfs) {
893 rte_flow_error_set(error, EINVAL,
894 RTE_FLOW_ERROR_TYPE_ACTION,
901 filter->mirror_vnic_id =
902 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
904 /* This simply indicates there's no driver loaded.
905 * This is not an error.
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_ACTION,
910 "Unable to get default VNIC for VF");
914 filter->mirror_vnic_id = dflt_vnic;
915 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
917 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
918 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
919 if (filter1 == NULL) {
923 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ACTION, act,
934 act = nxt_non_void_action(++act);
935 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_ACTION,
938 act, "Invalid action.");
947 bnxt_flow_validate(struct rte_eth_dev *dev,
948 const struct rte_flow_attr *attr,
949 const struct rte_flow_item pattern[],
950 const struct rte_flow_action actions[],
951 struct rte_flow_error *error)
953 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
954 struct bnxt_filter_info *filter;
957 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
961 filter = bnxt_get_unused_filter(bp);
962 if (filter == NULL) {
963 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
967 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
969 /* No need to hold on to this filter if we are just validating flow */
970 filter->fw_l2_filter_id = UINT64_MAX;
971 bnxt_free_filter(bp, filter);
977 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
979 struct bnxt_filter_info *mf;
980 struct rte_flow *flow;
983 for (i = bp->nr_vnics - 1; i >= 0; i--) {
984 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
986 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
989 if (mf->filter_type == nf->filter_type &&
990 mf->flags == nf->flags &&
991 mf->src_port == nf->src_port &&
992 mf->src_port_mask == nf->src_port_mask &&
993 mf->dst_port == nf->dst_port &&
994 mf->dst_port_mask == nf->dst_port_mask &&
995 mf->ip_protocol == nf->ip_protocol &&
996 mf->ip_addr_type == nf->ip_addr_type &&
997 mf->ethertype == nf->ethertype &&
998 mf->vni == nf->vni &&
999 mf->tunnel_type == nf->tunnel_type &&
1000 mf->l2_ovlan == nf->l2_ovlan &&
1001 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1002 mf->l2_ivlan == nf->l2_ivlan &&
1003 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1004 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1005 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1007 !memcmp(mf->src_macaddr, nf->src_macaddr,
1009 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1011 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1012 sizeof(nf->src_ipaddr)) &&
1013 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1014 sizeof(nf->src_ipaddr_mask)) &&
1015 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1016 sizeof(nf->dst_ipaddr)) &&
1017 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1018 sizeof(nf->dst_ipaddr_mask))) {
1019 if (mf->dst_id == nf->dst_id)
1021 /* Same Flow, Different queue
1022 * Clear the old ntuple filter
1024 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1025 bnxt_hwrm_clear_em_filter(bp, mf);
1026 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1027 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1028 /* Free the old filter, update flow
1031 bnxt_free_filter(bp, mf);
1040 static struct rte_flow *
1041 bnxt_flow_create(struct rte_eth_dev *dev,
1042 const struct rte_flow_attr *attr,
1043 const struct rte_flow_item pattern[],
1044 const struct rte_flow_action actions[],
1045 struct rte_flow_error *error)
1047 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1048 struct bnxt_filter_info *filter;
1049 struct bnxt_vnic_info *vnic = NULL;
1050 bool update_flow = false;
1051 struct rte_flow *flow;
1055 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1057 rte_flow_error_set(error, ENOMEM,
1058 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1059 "Failed to allocate memory");
1063 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1065 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1069 filter = bnxt_get_unused_filter(bp);
1070 if (filter == NULL) {
1071 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1075 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1080 ret = bnxt_match_filter(bp, filter);
1081 if (ret == -EEXIST) {
1082 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1083 /* Clear the filter that was created as part of
1084 * validate_and_parse_flow() above
1086 bnxt_hwrm_clear_l2_filter(bp, filter);
1088 } else if (ret == -EXDEV) {
1089 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1090 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1094 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1096 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1097 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1099 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1101 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1102 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1105 for (i = 0; i < bp->nr_vnics; i++) {
1106 vnic = &bp->vnic_info[i];
1107 if (filter->dst_id == vnic->fw_vnic_id)
1112 flow->filter = filter;
1118 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1119 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1123 bnxt_free_filter(bp, filter);
1126 rte_flow_error_set(error, ret,
1127 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128 "Matching Flow exists.");
1129 else if (ret == -EXDEV)
1130 rte_flow_error_set(error, ret,
1131 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1132 "Flow with pattern exists, updating destination queue");
1134 rte_flow_error_set(error, -ret,
1135 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1136 "Failed to create flow.");
1143 bnxt_flow_destroy(struct rte_eth_dev *dev,
1144 struct rte_flow *flow,
1145 struct rte_flow_error *error)
1147 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1148 struct bnxt_filter_info *filter = flow->filter;
1149 struct bnxt_vnic_info *vnic = flow->vnic;
1152 ret = bnxt_match_filter(bp, filter);
1154 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1155 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1156 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1157 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1158 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1160 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1162 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1165 rte_flow_error_set(error, -ret,
1166 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1167 "Failed to destroy flow.");
1174 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1176 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1177 struct bnxt_vnic_info *vnic;
1178 struct rte_flow *flow;
1182 for (i = 0; i < bp->nr_vnics; i++) {
1183 vnic = &bp->vnic_info[i];
1184 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1185 struct bnxt_filter_info *filter = flow->filter;
1187 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1188 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1189 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1190 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1193 rte_flow_error_set(error, -ret,
1194 RTE_FLOW_ERROR_TYPE_HANDLE,
1196 "Failed to flush flow in HW.");
1200 STAILQ_REMOVE(&vnic->flow_list, flow,
1209 const struct rte_flow_ops bnxt_flow_ops = {
1210 .validate = bnxt_flow_validate,
1211 .create = bnxt_flow_create,
1212 .destroy = bnxt_flow_destroy,
1213 .flush = bnxt_flow_flush,