1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "hsi_struct_def_dpdk.h"
24 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 struct bnxt_filter_info *filter;
28 /* Find the 1st unused filter from the free_filter_list pool*/
29 filter = STAILQ_FIRST(&bp->free_filter_list);
31 PMD_DRV_LOG(ERR, "No more free filter resources\n");
34 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36 /* Default to L2 MAC Addr filter */
37 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
38 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
39 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
40 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
46 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 struct bnxt_filter_info *filter;
50 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
57 filter->fw_l2_filter_id = UINT64_MAX;
58 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
62 void bnxt_init_filters(struct bnxt *bp)
64 struct bnxt_filter_info *filter;
67 max_filters = bp->max_l2_ctx;
68 STAILQ_INIT(&bp->free_filter_list);
69 for (i = 0; i < max_filters; i++) {
70 filter = &bp->filter_info[i];
71 filter->fw_l2_filter_id = -1;
72 filter->fw_em_filter_id = -1;
73 filter->fw_ntuple_filter_id = -1;
74 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
78 void bnxt_free_all_filters(struct bnxt *bp)
80 struct bnxt_vnic_info *vnic;
81 struct bnxt_filter_info *filter, *temp_filter;
84 for (i = 0; i < MAX_FF_POOLS; i++) {
85 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
86 filter = STAILQ_FIRST(&vnic->filter);
88 temp_filter = STAILQ_NEXT(filter, next);
89 STAILQ_REMOVE(&vnic->filter, filter,
90 bnxt_filter_info, next);
91 STAILQ_INSERT_TAIL(&bp->free_filter_list,
95 STAILQ_INIT(&vnic->filter);
99 for (i = 0; i < bp->pf.max_vfs; i++) {
100 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
101 bnxt_hwrm_clear_l2_filter(bp, filter);
106 void bnxt_free_filter_mem(struct bnxt *bp)
108 struct bnxt_filter_info *filter;
109 uint16_t max_filters, i;
112 if (bp->filter_info == NULL)
115 /* Ensure that all filters are freed */
116 max_filters = bp->max_l2_ctx;
117 for (i = 0; i < max_filters; i++) {
118 filter = &bp->filter_info[i];
119 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
120 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
121 /* Call HWRM to try to free filter again */
122 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
125 "HWRM filter cannot be freed rc = %d\n",
128 filter->fw_l2_filter_id = UINT64_MAX;
130 STAILQ_INIT(&bp->free_filter_list);
132 rte_free(bp->filter_info);
133 bp->filter_info = NULL;
136 int bnxt_alloc_filter_mem(struct bnxt *bp)
138 struct bnxt_filter_info *filter_mem;
139 uint16_t max_filters;
141 max_filters = bp->max_l2_ctx;
142 /* Allocate memory for VNIC pool and filter pool */
143 filter_mem = rte_zmalloc("bnxt_filter_info",
144 max_filters * sizeof(struct bnxt_filter_info),
146 if (filter_mem == NULL) {
147 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
151 bp->filter_info = filter_mem;
155 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
157 struct bnxt_filter_info *filter;
159 /* Find the 1st unused filter from the free_filter_list pool*/
160 filter = STAILQ_FIRST(&bp->free_filter_list);
162 PMD_DRV_LOG(ERR, "No more free filter resources\n");
165 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
170 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
172 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
176 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
177 const struct rte_flow_item pattern[],
178 const struct rte_flow_action actions[],
179 struct rte_flow_error *error)
182 rte_flow_error_set(error, EINVAL,
183 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
184 NULL, "NULL pattern.");
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
191 NULL, "NULL action.");
196 rte_flow_error_set(error, EINVAL,
197 RTE_FLOW_ERROR_TYPE_ATTR,
198 NULL, "NULL attribute.");
205 static const struct rte_flow_item *
206 nxt_non_void_pattern(const struct rte_flow_item *cur)
209 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
215 static const struct rte_flow_action *
216 nxt_non_void_action(const struct rte_flow_action *cur)
219 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
225 int check_zero_bytes(const uint8_t *bytes, int len)
228 for (i = 0; i < len; i++)
229 if (bytes[i] != 0x00)
235 bnxt_filter_type_check(const struct rte_flow_item pattern[],
236 struct rte_flow_error *error __rte_unused)
238 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
241 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
242 switch (item->type) {
243 case RTE_FLOW_ITEM_TYPE_ETH:
246 case RTE_FLOW_ITEM_TYPE_VLAN:
249 case RTE_FLOW_ITEM_TYPE_IPV4:
250 case RTE_FLOW_ITEM_TYPE_IPV6:
251 case RTE_FLOW_ITEM_TYPE_TCP:
252 case RTE_FLOW_ITEM_TYPE_UDP:
254 /* need ntuple match, reset exact match */
257 "VLAN flow cannot use NTUPLE filter\n");
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
261 "Cannot use VLAN with NTUPLE");
267 PMD_DRV_LOG(ERR, "Unknown Flow type");
276 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
277 const struct rte_flow_item pattern[],
278 struct rte_flow_error *error,
279 struct bnxt_filter_info *filter)
281 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
282 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
283 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
284 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
285 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
286 const struct rte_flow_item_udp *udp_spec, *udp_mask;
287 const struct rte_flow_item_eth *eth_spec, *eth_mask;
288 const struct rte_flow_item_nvgre *nvgre_spec;
289 const struct rte_flow_item_nvgre *nvgre_mask;
290 const struct rte_flow_item_vxlan *vxlan_spec;
291 const struct rte_flow_item_vxlan *vxlan_mask;
292 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
293 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
294 const struct rte_flow_item_vf *vf_spec;
295 uint32_t tenant_id_be = 0;
303 use_ntuple = bnxt_filter_type_check(pattern, error);
304 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
308 filter->filter_type = use_ntuple ?
309 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
311 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
313 /* last or range is NOT supported as match criteria */
314 rte_flow_error_set(error, EINVAL,
315 RTE_FLOW_ERROR_TYPE_ITEM,
317 "No support for range");
320 if (!item->spec || !item->mask) {
321 rte_flow_error_set(error, EINVAL,
322 RTE_FLOW_ERROR_TYPE_ITEM,
324 "spec/mask is NULL");
327 switch (item->type) {
328 case RTE_FLOW_ITEM_TYPE_ETH:
329 eth_spec = item->spec;
330 eth_mask = item->mask;
332 /* Source MAC address mask cannot be partially set.
333 * Should be All 0's or all 1's.
334 * Destination MAC address mask must not be partially
335 * set. Should be all 1's or all 0's.
337 if ((!is_zero_ether_addr(ð_mask->src) &&
338 !is_broadcast_ether_addr(ð_mask->src)) ||
339 (!is_zero_ether_addr(ð_mask->dst) &&
340 !is_broadcast_ether_addr(ð_mask->dst))) {
341 rte_flow_error_set(error, EINVAL,
342 RTE_FLOW_ERROR_TYPE_ITEM,
344 "MAC_addr mask not valid");
348 /* Mask is not allowed. Only exact matches are */
349 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM,
353 "ethertype mask not valid");
357 if (is_broadcast_ether_addr(ð_mask->dst)) {
358 rte_memcpy(filter->dst_macaddr,
361 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
362 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
364 if (is_broadcast_ether_addr(ð_mask->src)) {
365 rte_memcpy(filter->src_macaddr,
368 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
369 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
372 * RTE_LOG(ERR, PMD, "Handle this condition\n");
375 if (eth_spec->type) {
377 rte_be_to_cpu_16(eth_spec->type);
379 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
380 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
384 case RTE_FLOW_ITEM_TYPE_VLAN:
385 vlan_spec = item->spec;
386 vlan_mask = item->mask;
387 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
388 /* Only the VLAN ID can be matched. */
390 rte_be_to_cpu_16(vlan_spec->tci &
392 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_ITEM,
397 "VLAN mask is invalid");
402 case RTE_FLOW_ITEM_TYPE_IPV4:
403 /* If mask is not involved, we could use EM filters. */
404 ipv4_spec = item->spec;
405 ipv4_mask = item->mask;
406 /* Only IP DST and SRC fields are maskable. */
407 if (ipv4_mask->hdr.version_ihl ||
408 ipv4_mask->hdr.type_of_service ||
409 ipv4_mask->hdr.total_length ||
410 ipv4_mask->hdr.packet_id ||
411 ipv4_mask->hdr.fragment_offset ||
412 ipv4_mask->hdr.time_to_live ||
413 ipv4_mask->hdr.next_proto_id ||
414 ipv4_mask->hdr.hdr_checksum) {
415 rte_flow_error_set(error, EINVAL,
416 RTE_FLOW_ERROR_TYPE_ITEM,
418 "Invalid IPv4 mask.");
421 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
422 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
424 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
425 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
427 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
428 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
429 if (ipv4_mask->hdr.src_addr) {
430 filter->src_ipaddr_mask[0] =
431 ipv4_mask->hdr.src_addr;
432 en |= !use_ntuple ? 0 :
433 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
435 if (ipv4_mask->hdr.dst_addr) {
436 filter->dst_ipaddr_mask[0] =
437 ipv4_mask->hdr.dst_addr;
438 en |= !use_ntuple ? 0 :
439 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
441 filter->ip_addr_type = use_ntuple ?
442 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
443 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
444 if (ipv4_spec->hdr.next_proto_id) {
445 filter->ip_protocol =
446 ipv4_spec->hdr.next_proto_id;
448 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
450 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
453 case RTE_FLOW_ITEM_TYPE_IPV6:
454 ipv6_spec = item->spec;
455 ipv6_mask = item->mask;
457 /* Only IP DST and SRC fields are maskable. */
458 if (ipv6_mask->hdr.vtc_flow ||
459 ipv6_mask->hdr.payload_len ||
460 ipv6_mask->hdr.proto ||
461 ipv6_mask->hdr.hop_limits) {
462 rte_flow_error_set(error, EINVAL,
463 RTE_FLOW_ERROR_TYPE_ITEM,
465 "Invalid IPv6 mask.");
470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
473 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
474 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
475 rte_memcpy(filter->src_ipaddr,
476 ipv6_spec->hdr.src_addr, 16);
477 rte_memcpy(filter->dst_ipaddr,
478 ipv6_spec->hdr.dst_addr, 16);
479 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
480 rte_memcpy(filter->src_ipaddr_mask,
481 ipv6_mask->hdr.src_addr, 16);
482 en |= !use_ntuple ? 0 :
483 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
485 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
486 rte_memcpy(filter->dst_ipaddr_mask,
487 ipv6_mask->hdr.dst_addr, 16);
488 en |= !use_ntuple ? 0 :
489 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
491 filter->ip_addr_type = use_ntuple ?
492 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
493 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
495 case RTE_FLOW_ITEM_TYPE_TCP:
496 tcp_spec = item->spec;
497 tcp_mask = item->mask;
499 /* Check TCP mask. Only DST & SRC ports are maskable */
500 if (tcp_mask->hdr.sent_seq ||
501 tcp_mask->hdr.recv_ack ||
502 tcp_mask->hdr.data_off ||
503 tcp_mask->hdr.tcp_flags ||
504 tcp_mask->hdr.rx_win ||
505 tcp_mask->hdr.cksum ||
506 tcp_mask->hdr.tcp_urp) {
507 rte_flow_error_set(error, EINVAL,
508 RTE_FLOW_ERROR_TYPE_ITEM,
513 filter->src_port = tcp_spec->hdr.src_port;
514 filter->dst_port = tcp_spec->hdr.dst_port;
516 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
517 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
519 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
520 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
521 if (tcp_mask->hdr.dst_port) {
522 filter->dst_port_mask = tcp_mask->hdr.dst_port;
523 en |= !use_ntuple ? 0 :
524 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
526 if (tcp_mask->hdr.src_port) {
527 filter->src_port_mask = tcp_mask->hdr.src_port;
528 en |= !use_ntuple ? 0 :
529 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
532 case RTE_FLOW_ITEM_TYPE_UDP:
533 udp_spec = item->spec;
534 udp_mask = item->mask;
536 if (udp_mask->hdr.dgram_len ||
537 udp_mask->hdr.dgram_cksum) {
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ITEM,
545 filter->src_port = udp_spec->hdr.src_port;
546 filter->dst_port = udp_spec->hdr.dst_port;
548 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
549 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
551 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
552 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
554 if (udp_mask->hdr.dst_port) {
555 filter->dst_port_mask = udp_mask->hdr.dst_port;
556 en |= !use_ntuple ? 0 :
557 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
559 if (udp_mask->hdr.src_port) {
560 filter->src_port_mask = udp_mask->hdr.src_port;
561 en |= !use_ntuple ? 0 :
562 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
565 case RTE_FLOW_ITEM_TYPE_VXLAN:
566 vxlan_spec = item->spec;
567 vxlan_mask = item->mask;
568 /* Check if VXLAN item is used to describe protocol.
569 * If yes, both spec and mask should be NULL.
570 * If no, both spec and mask shouldn't be NULL.
572 if ((!vxlan_spec && vxlan_mask) ||
573 (vxlan_spec && !vxlan_mask)) {
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ITEM,
577 "Invalid VXLAN item");
581 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
582 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
583 vxlan_spec->flags != 0x8) {
584 rte_flow_error_set(error, EINVAL,
585 RTE_FLOW_ERROR_TYPE_ITEM,
587 "Invalid VXLAN item");
591 /* Check if VNI is masked. */
592 if (vxlan_spec && vxlan_mask) {
594 !!memcmp(vxlan_mask->vni, vni_mask,
597 rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM,
604 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
607 rte_be_to_cpu_32(tenant_id_be);
608 filter->tunnel_type =
609 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
612 case RTE_FLOW_ITEM_TYPE_NVGRE:
613 nvgre_spec = item->spec;
614 nvgre_mask = item->mask;
615 /* Check if NVGRE item is used to describe protocol.
616 * If yes, both spec and mask should be NULL.
617 * If no, both spec and mask shouldn't be NULL.
619 if ((!nvgre_spec && nvgre_mask) ||
620 (nvgre_spec && !nvgre_mask)) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM,
624 "Invalid NVGRE item");
628 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
629 nvgre_spec->protocol != 0x6558) {
630 rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM,
633 "Invalid NVGRE item");
637 if (nvgre_spec && nvgre_mask) {
639 !!memcmp(nvgre_mask->tni, tni_mask,
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM,
648 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
651 rte_be_to_cpu_32(tenant_id_be);
652 filter->tunnel_type =
653 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
656 case RTE_FLOW_ITEM_TYPE_VF:
657 vf_spec = item->spec;
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM,
663 "Configuring on a VF!");
667 if (vf >= bp->pdev->max_vfs) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ITEM,
675 filter->mirror_vnic_id =
676 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
678 /* This simply indicates there's no driver
679 * loaded. This is not an error.
681 rte_flow_error_set(error, EINVAL,
682 RTE_FLOW_ERROR_TYPE_ITEM,
684 "Unable to get default VNIC for VF");
687 filter->mirror_vnic_id = dflt_vnic;
688 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
695 filter->enables = en;
700 /* Parse attributes */
702 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
703 struct rte_flow_error *error)
705 /* Must be input direction */
706 if (!attr->ingress) {
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
709 attr, "Only support ingress.");
715 rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
717 attr, "No support for egress.");
722 if (attr->priority) {
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
725 attr, "No support for priority.");
731 rte_flow_error_set(error, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
733 attr, "No support for group.");
740 struct bnxt_filter_info *
741 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
742 struct bnxt_vnic_info *vnic)
744 struct bnxt_filter_info *filter1, *f0;
745 struct bnxt_vnic_info *vnic0;
748 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
749 f0 = STAILQ_FIRST(&vnic0->filter);
751 //This flow has same DST MAC as the port/l2 filter.
752 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
755 //This flow needs DST MAC which is not same as port/l2
756 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
757 filter1 = bnxt_get_unused_filter(bp);
760 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
761 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
762 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
763 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
764 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
765 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
768 bnxt_free_filter(bp, filter1);
775 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
776 const struct rte_flow_item pattern[],
777 const struct rte_flow_action actions[],
778 const struct rte_flow_attr *attr,
779 struct rte_flow_error *error,
780 struct bnxt_filter_info *filter)
782 const struct rte_flow_action *act = nxt_non_void_action(actions);
783 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
784 const struct rte_flow_action_queue *act_q;
785 const struct rte_flow_action_vf *act_vf;
786 struct bnxt_vnic_info *vnic, *vnic0;
787 struct bnxt_filter_info *filter1;
792 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
793 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
796 "Cannot create flow on RSS queues");
801 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
805 rc = bnxt_flow_parse_attr(attr, error);
808 //Since we support ingress attribute only - right now.
809 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
812 case RTE_FLOW_ACTION_TYPE_QUEUE:
813 /* Allow this flow. Redirect to a VNIC. */
814 act_q = (const struct rte_flow_action_queue *)act->conf;
815 if (act_q->index >= bp->rx_nr_rings) {
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_ACTION, act,
818 "Invalid queue ID.");
822 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
824 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
825 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
827 rte_flow_error_set(error, EINVAL,
828 RTE_FLOW_ERROR_TYPE_ACTION, act,
829 "No matching VNIC for queue ID.");
833 filter->dst_id = vnic->fw_vnic_id;
834 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
835 if (filter1 == NULL) {
839 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
840 PMD_DRV_LOG(DEBUG, "VNIC found\n");
842 case RTE_FLOW_ACTION_TYPE_DROP:
843 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
844 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
845 if (filter1 == NULL) {
849 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
850 if (filter->filter_type == HWRM_CFA_EM_FILTER)
852 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
855 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
857 case RTE_FLOW_ACTION_TYPE_COUNT:
858 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
859 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
860 if (filter1 == NULL) {
864 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
865 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
867 case RTE_FLOW_ACTION_TYPE_VF:
868 act_vf = (const struct rte_flow_action_vf *)act->conf;
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_ACTION,
874 "Configuring on a VF!");
879 if (vf >= bp->pdev->max_vfs) {
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION,
888 filter->mirror_vnic_id =
889 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
891 /* This simply indicates there's no driver loaded.
892 * This is not an error.
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ACTION,
897 "Unable to get default VNIC for VF");
901 filter->mirror_vnic_id = dflt_vnic;
902 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
904 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
905 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
906 if (filter1 == NULL) {
910 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ACTION, act,
922 bnxt_free_filter(bp, filter1);
923 filter1->fw_l2_filter_id = -1;
926 act = nxt_non_void_action(++act);
927 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ACTION,
930 act, "Invalid action.");
939 bnxt_flow_validate(struct rte_eth_dev *dev,
940 const struct rte_flow_attr *attr,
941 const struct rte_flow_item pattern[],
942 const struct rte_flow_action actions[],
943 struct rte_flow_error *error)
945 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
946 struct bnxt_filter_info *filter;
949 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
953 filter = bnxt_get_unused_filter(bp);
954 if (filter == NULL) {
955 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
959 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
961 /* No need to hold on to this filter if we are just validating flow */
962 filter->fw_l2_filter_id = -1;
963 bnxt_free_filter(bp, filter);
969 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
971 struct bnxt_filter_info *mf;
972 struct rte_flow *flow;
975 for (i = bp->nr_vnics - 1; i >= 0; i--) {
976 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
978 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
981 if (mf->filter_type == nf->filter_type &&
982 mf->flags == nf->flags &&
983 mf->src_port == nf->src_port &&
984 mf->src_port_mask == nf->src_port_mask &&
985 mf->dst_port == nf->dst_port &&
986 mf->dst_port_mask == nf->dst_port_mask &&
987 mf->ip_protocol == nf->ip_protocol &&
988 mf->ip_addr_type == nf->ip_addr_type &&
989 mf->ethertype == nf->ethertype &&
990 mf->vni == nf->vni &&
991 mf->tunnel_type == nf->tunnel_type &&
992 mf->l2_ovlan == nf->l2_ovlan &&
993 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
994 mf->l2_ivlan == nf->l2_ivlan &&
995 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
996 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
997 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
999 !memcmp(mf->src_macaddr, nf->src_macaddr,
1001 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1003 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1004 sizeof(nf->src_ipaddr)) &&
1005 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1006 sizeof(nf->src_ipaddr_mask)) &&
1007 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1008 sizeof(nf->dst_ipaddr)) &&
1009 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1010 sizeof(nf->dst_ipaddr_mask))) {
1011 if (mf->dst_id == nf->dst_id)
1013 /* Same Flow, Different queue
1014 * Clear the old ntuple filter
1016 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1017 bnxt_hwrm_clear_em_filter(bp, mf);
1018 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1019 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1020 /* Free the old filter, update flow
1023 bnxt_free_filter(bp, mf);
1032 static struct rte_flow *
1033 bnxt_flow_create(struct rte_eth_dev *dev,
1034 const struct rte_flow_attr *attr,
1035 const struct rte_flow_item pattern[],
1036 const struct rte_flow_action actions[],
1037 struct rte_flow_error *error)
1039 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1040 struct bnxt_filter_info *filter;
1041 struct bnxt_vnic_info *vnic = NULL;
1042 bool update_flow = false;
1043 struct rte_flow *flow;
1047 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1049 rte_flow_error_set(error, ENOMEM,
1050 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1051 "Failed to allocate memory");
1055 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1057 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1061 filter = bnxt_get_unused_filter(bp);
1062 if (filter == NULL) {
1063 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1067 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1072 ret = bnxt_match_filter(bp, filter);
1073 if (ret == -EEXIST) {
1074 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1075 /* Clear the filter that was created as part of
1076 * validate_and_parse_flow() above
1078 bnxt_hwrm_clear_l2_filter(bp, filter);
1080 } else if (ret == -EXDEV) {
1081 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1082 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1086 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1088 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1089 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1091 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1093 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1094 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1097 for (i = 0; i < bp->nr_vnics; i++) {
1098 vnic = &bp->vnic_info[i];
1099 if (filter->dst_id == vnic->fw_vnic_id)
1104 flow->filter = filter;
1110 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1111 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1115 bnxt_free_filter(bp, filter);
1118 rte_flow_error_set(error, ret,
1119 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1120 "Matching Flow exists.");
1121 else if (ret == -EXDEV)
1122 rte_flow_error_set(error, ret,
1123 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1124 "Flow with pattern exists, updating destination queue");
1126 rte_flow_error_set(error, -ret,
1127 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1128 "Failed to create flow.");
1135 bnxt_flow_destroy(struct rte_eth_dev *dev,
1136 struct rte_flow *flow,
1137 struct rte_flow_error *error)
1139 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1140 struct bnxt_filter_info *filter = flow->filter;
1141 struct bnxt_vnic_info *vnic = flow->vnic;
1144 ret = bnxt_match_filter(bp, filter);
1146 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1147 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1148 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1149 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1150 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1152 bnxt_hwrm_clear_l2_filter(bp, filter);
1154 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1157 rte_flow_error_set(error, -ret,
1158 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1159 "Failed to destroy flow.");
1166 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1168 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1169 struct bnxt_vnic_info *vnic;
1170 struct rte_flow *flow;
1174 for (i = 0; i < bp->nr_vnics; i++) {
1175 vnic = &bp->vnic_info[i];
1176 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1177 struct bnxt_filter_info *filter = flow->filter;
1179 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1180 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1181 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1182 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1185 rte_flow_error_set(error, -ret,
1186 RTE_FLOW_ERROR_TYPE_HANDLE,
1188 "Failed to flush flow in HW.");
1192 STAILQ_REMOVE(&vnic->flow_list, flow,
1201 const struct rte_flow_ops bnxt_flow_ops = {
1202 .validate = bnxt_flow_validate,
1203 .create = bnxt_flow_create,
1204 .destroy = bnxt_flow_destroy,
1205 .flush = bnxt_flow_flush,