1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_ring.h"
19 #include "bnxt_vnic.h"
20 #include "bnxt_util.h"
21 #include "hsi_struct_def_dpdk.h"
24 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
25 const struct rte_flow_item pattern[],
26 const struct rte_flow_action actions[],
27 struct rte_flow_error *error)
30 rte_flow_error_set(error,
32 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
39 rte_flow_error_set(error,
41 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
48 rte_flow_error_set(error,
50 RTE_FLOW_ERROR_TYPE_ATTR,
59 static const struct rte_flow_item *
60 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
63 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
69 static const struct rte_flow_action *
70 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
73 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
80 bnxt_filter_type_check(const struct rte_flow_item pattern[],
81 struct rte_flow_error *error __rte_unused)
83 const struct rte_flow_item *item =
84 bnxt_flow_non_void_item(pattern);
87 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
89 case RTE_FLOW_ITEM_TYPE_ETH:
92 case RTE_FLOW_ITEM_TYPE_VLAN:
95 case RTE_FLOW_ITEM_TYPE_IPV4:
96 case RTE_FLOW_ITEM_TYPE_IPV6:
97 case RTE_FLOW_ITEM_TYPE_TCP:
98 case RTE_FLOW_ITEM_TYPE_UDP:
100 /* need ntuple match, reset exact match */
103 "VLAN flow cannot use NTUPLE filter\n");
107 RTE_FLOW_ERROR_TYPE_ITEM,
109 "Cannot use VLAN with NTUPLE");
114 case RTE_FLOW_ITEM_TYPE_ANY:
118 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
127 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
128 const struct rte_flow_attr *attr,
129 const struct rte_flow_item pattern[],
130 struct rte_flow_error *error,
131 struct bnxt_filter_info *filter)
133 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
134 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
135 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
136 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
137 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
138 const struct rte_flow_item_udp *udp_spec, *udp_mask;
139 const struct rte_flow_item_eth *eth_spec, *eth_mask;
140 const struct rte_flow_item_nvgre *nvgre_spec;
141 const struct rte_flow_item_nvgre *nvgre_mask;
142 const struct rte_flow_item_gre *gre_spec;
143 const struct rte_flow_item_gre *gre_mask;
144 const struct rte_flow_item_vxlan *vxlan_spec;
145 const struct rte_flow_item_vxlan *vxlan_mask;
146 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
147 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
148 const struct rte_flow_item_vf *vf_spec;
149 uint32_t tenant_id_be = 0;
155 uint32_t en_ethertype;
158 use_ntuple = bnxt_filter_type_check(pattern, error);
159 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
163 filter->filter_type = use_ntuple ?
164 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
165 en_ethertype = use_ntuple ?
166 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
167 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
169 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
171 /* last or range is NOT supported as match criteria */
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ITEM,
175 "No support for range");
179 switch (item->type) {
180 case RTE_FLOW_ITEM_TYPE_ETH:
181 if (!item->spec || !item->mask)
184 eth_spec = item->spec;
185 eth_mask = item->mask;
187 /* Source MAC address mask cannot be partially set.
188 * Should be All 0's or all 1's.
189 * Destination MAC address mask must not be partially
190 * set. Should be all 1's or all 0's.
192 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
193 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
194 (!rte_is_zero_ether_addr(ð_mask->dst) &&
195 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
196 rte_flow_error_set(error,
198 RTE_FLOW_ERROR_TYPE_ITEM,
200 "MAC_addr mask not valid");
204 /* Mask is not allowed. Only exact matches are */
205 if (eth_mask->type &&
206 eth_mask->type != RTE_BE16(0xffff)) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
210 "ethertype mask not valid");
214 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
215 rte_memcpy(filter->dst_macaddr,
218 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
219 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
222 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
223 rte_memcpy(filter->src_macaddr,
226 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
227 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
230 * PMD_DRV_LOG(ERR, "Handle this condition\n");
233 if (eth_mask->type) {
235 rte_be_to_cpu_16(eth_spec->type);
240 case RTE_FLOW_ITEM_TYPE_VLAN:
241 vlan_spec = item->spec;
242 vlan_mask = item->mask;
243 if (en & en_ethertype) {
244 rte_flow_error_set(error, EINVAL,
245 RTE_FLOW_ERROR_TYPE_ITEM,
247 "VLAN TPID matching is not"
251 if (vlan_mask->tci &&
252 vlan_mask->tci == RTE_BE16(0x0fff)) {
253 /* Only the VLAN ID can be matched. */
255 rte_be_to_cpu_16(vlan_spec->tci &
257 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
259 rte_flow_error_set(error,
261 RTE_FLOW_ERROR_TYPE_ITEM,
263 "VLAN mask is invalid");
266 if (vlan_mask->inner_type &&
267 vlan_mask->inner_type != RTE_BE16(0xffff)) {
268 rte_flow_error_set(error, EINVAL,
269 RTE_FLOW_ERROR_TYPE_ITEM,
271 "inner ethertype mask not"
275 if (vlan_mask->inner_type) {
277 rte_be_to_cpu_16(vlan_spec->inner_type);
282 case RTE_FLOW_ITEM_TYPE_IPV4:
283 /* If mask is not involved, we could use EM filters. */
284 ipv4_spec = item->spec;
285 ipv4_mask = item->mask;
287 if (!item->spec || !item->mask)
290 /* Only IP DST and SRC fields are maskable. */
291 if (ipv4_mask->hdr.version_ihl ||
292 ipv4_mask->hdr.type_of_service ||
293 ipv4_mask->hdr.total_length ||
294 ipv4_mask->hdr.packet_id ||
295 ipv4_mask->hdr.fragment_offset ||
296 ipv4_mask->hdr.time_to_live ||
297 ipv4_mask->hdr.next_proto_id ||
298 ipv4_mask->hdr.hdr_checksum) {
299 rte_flow_error_set(error,
301 RTE_FLOW_ERROR_TYPE_ITEM,
303 "Invalid IPv4 mask.");
307 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
308 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
311 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
312 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
314 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
315 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
317 if (ipv4_mask->hdr.src_addr) {
318 filter->src_ipaddr_mask[0] =
319 ipv4_mask->hdr.src_addr;
320 en |= !use_ntuple ? 0 :
321 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
324 if (ipv4_mask->hdr.dst_addr) {
325 filter->dst_ipaddr_mask[0] =
326 ipv4_mask->hdr.dst_addr;
327 en |= !use_ntuple ? 0 :
328 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
331 filter->ip_addr_type = use_ntuple ?
332 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
333 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
335 if (ipv4_spec->hdr.next_proto_id) {
336 filter->ip_protocol =
337 ipv4_spec->hdr.next_proto_id;
339 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
341 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
344 case RTE_FLOW_ITEM_TYPE_IPV6:
345 ipv6_spec = item->spec;
346 ipv6_mask = item->mask;
348 if (!item->spec || !item->mask)
351 /* Only IP DST and SRC fields are maskable. */
352 if (ipv6_mask->hdr.vtc_flow ||
353 ipv6_mask->hdr.payload_len ||
354 ipv6_mask->hdr.proto ||
355 ipv6_mask->hdr.hop_limits) {
356 rte_flow_error_set(error,
358 RTE_FLOW_ERROR_TYPE_ITEM,
360 "Invalid IPv6 mask.");
365 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
366 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
368 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
369 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
371 rte_memcpy(filter->src_ipaddr,
372 ipv6_spec->hdr.src_addr, 16);
373 rte_memcpy(filter->dst_ipaddr,
374 ipv6_spec->hdr.dst_addr, 16);
376 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
378 rte_memcpy(filter->src_ipaddr_mask,
379 ipv6_mask->hdr.src_addr, 16);
380 en |= !use_ntuple ? 0 :
381 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
384 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
386 rte_memcpy(filter->dst_ipaddr_mask,
387 ipv6_mask->hdr.dst_addr, 16);
388 en |= !use_ntuple ? 0 :
389 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
392 filter->ip_addr_type = use_ntuple ?
393 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
394 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
396 case RTE_FLOW_ITEM_TYPE_TCP:
397 tcp_spec = item->spec;
398 tcp_mask = item->mask;
400 if (!item->spec || !item->mask)
403 /* Check TCP mask. Only DST & SRC ports are maskable */
404 if (tcp_mask->hdr.sent_seq ||
405 tcp_mask->hdr.recv_ack ||
406 tcp_mask->hdr.data_off ||
407 tcp_mask->hdr.tcp_flags ||
408 tcp_mask->hdr.rx_win ||
409 tcp_mask->hdr.cksum ||
410 tcp_mask->hdr.tcp_urp) {
411 rte_flow_error_set(error,
413 RTE_FLOW_ERROR_TYPE_ITEM,
419 filter->src_port = tcp_spec->hdr.src_port;
420 filter->dst_port = tcp_spec->hdr.dst_port;
423 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
424 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
426 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
427 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
429 if (tcp_mask->hdr.dst_port) {
430 filter->dst_port_mask = tcp_mask->hdr.dst_port;
431 en |= !use_ntuple ? 0 :
432 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
435 if (tcp_mask->hdr.src_port) {
436 filter->src_port_mask = tcp_mask->hdr.src_port;
437 en |= !use_ntuple ? 0 :
438 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
441 case RTE_FLOW_ITEM_TYPE_UDP:
442 udp_spec = item->spec;
443 udp_mask = item->mask;
445 if (!item->spec || !item->mask)
448 if (udp_mask->hdr.dgram_len ||
449 udp_mask->hdr.dgram_cksum) {
450 rte_flow_error_set(error,
452 RTE_FLOW_ERROR_TYPE_ITEM,
458 filter->src_port = udp_spec->hdr.src_port;
459 filter->dst_port = udp_spec->hdr.dst_port;
462 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
463 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
465 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
466 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
468 if (udp_mask->hdr.dst_port) {
469 filter->dst_port_mask = udp_mask->hdr.dst_port;
470 en |= !use_ntuple ? 0 :
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
474 if (udp_mask->hdr.src_port) {
475 filter->src_port_mask = udp_mask->hdr.src_port;
476 en |= !use_ntuple ? 0 :
477 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
480 case RTE_FLOW_ITEM_TYPE_VXLAN:
481 vxlan_spec = item->spec;
482 vxlan_mask = item->mask;
483 /* Check if VXLAN item is used to describe protocol.
484 * If yes, both spec and mask should be NULL.
485 * If no, both spec and mask shouldn't be NULL.
487 if ((!vxlan_spec && vxlan_mask) ||
488 (vxlan_spec && !vxlan_mask)) {
489 rte_flow_error_set(error,
491 RTE_FLOW_ERROR_TYPE_ITEM,
493 "Invalid VXLAN item");
497 if (!vxlan_spec && !vxlan_mask) {
498 filter->tunnel_type =
499 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
503 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
504 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
505 vxlan_spec->flags != 0x8) {
506 rte_flow_error_set(error,
508 RTE_FLOW_ERROR_TYPE_ITEM,
510 "Invalid VXLAN item");
514 /* Check if VNI is masked. */
515 if (vxlan_spec && vxlan_mask) {
517 !!memcmp(vxlan_mask->vni, vni_mask,
523 RTE_FLOW_ERROR_TYPE_ITEM,
529 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
532 rte_be_to_cpu_32(tenant_id_be);
533 filter->tunnel_type =
534 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
537 case RTE_FLOW_ITEM_TYPE_NVGRE:
538 nvgre_spec = item->spec;
539 nvgre_mask = item->mask;
540 /* Check if NVGRE item is used to describe protocol.
541 * If yes, both spec and mask should be NULL.
542 * If no, both spec and mask shouldn't be NULL.
544 if ((!nvgre_spec && nvgre_mask) ||
545 (nvgre_spec && !nvgre_mask)) {
546 rte_flow_error_set(error,
548 RTE_FLOW_ERROR_TYPE_ITEM,
550 "Invalid NVGRE item");
554 if (!nvgre_spec && !nvgre_mask) {
555 filter->tunnel_type =
556 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
560 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
561 nvgre_spec->protocol != 0x6558) {
562 rte_flow_error_set(error,
564 RTE_FLOW_ERROR_TYPE_ITEM,
566 "Invalid NVGRE item");
570 if (nvgre_spec && nvgre_mask) {
572 !!memcmp(nvgre_mask->tni, tni_mask,
578 RTE_FLOW_ERROR_TYPE_ITEM,
583 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
586 rte_be_to_cpu_32(tenant_id_be);
587 filter->tunnel_type =
588 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
592 case RTE_FLOW_ITEM_TYPE_GRE:
593 gre_spec = (const struct rte_flow_item_gre *)item->spec;
594 gre_mask = (const struct rte_flow_item_gre *)item->mask;
597 *Check if GRE item is used to describe protocol.
598 * If yes, both spec and mask should be NULL.
599 * If no, both spec and mask shouldn't be NULL.
601 if (!!gre_spec ^ !!gre_mask) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM,
609 if (!gre_spec && !gre_mask) {
610 filter->tunnel_type =
611 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
616 case RTE_FLOW_ITEM_TYPE_VF:
617 vf_spec = item->spec;
620 rte_flow_error_set(error,
622 RTE_FLOW_ERROR_TYPE_ITEM,
624 "Configuring on a VF!");
628 if (vf >= bp->pdev->max_vfs) {
629 rte_flow_error_set(error,
631 RTE_FLOW_ERROR_TYPE_ITEM,
637 if (!attr->transfer) {
638 rte_flow_error_set(error,
640 RTE_FLOW_ERROR_TYPE_ITEM,
642 "Matching VF traffic without"
643 " affecting it (transfer attribute)"
648 filter->mirror_vnic_id =
649 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
651 /* This simply indicates there's no driver
652 * loaded. This is not an error.
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "Unable to get default VNIC for VF");
663 filter->mirror_vnic_id = dflt_vnic;
664 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
671 filter->enables = en;
676 /* Parse attributes */
678 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
679 struct rte_flow_error *error)
681 /* Must be input direction */
682 if (!attr->ingress) {
683 rte_flow_error_set(error,
685 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
687 "Only support ingress.");
693 rte_flow_error_set(error,
695 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
697 "No support for egress.");
702 if (attr->priority) {
703 rte_flow_error_set(error,
705 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
707 "No support for priority.");
713 struct bnxt_filter_info *
714 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
715 struct bnxt_vnic_info *vnic)
717 struct bnxt_filter_info *filter1, *f0;
718 struct bnxt_vnic_info *vnic0;
721 vnic0 = &bp->vnic_info[0];
722 f0 = STAILQ_FIRST(&vnic0->filter);
724 /* This flow has same DST MAC as the port/l2 filter. */
725 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
728 /* This flow needs DST MAC which is not same as port/l2 */
729 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
730 filter1 = bnxt_get_unused_filter(bp);
734 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
735 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
736 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
737 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
738 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
739 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
742 bnxt_free_filter(bp, filter1);
748 static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
750 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
751 uint64_t rx_offloads = dev_conf->rxmode.offloads;
754 rc = bnxt_vnic_grp_alloc(bp, vnic);
758 rc = bnxt_hwrm_vnic_alloc(bp, vnic);
760 PMD_DRV_LOG(ERR, "HWRM vnic alloc failure rc: %x\n", rc);
765 /* RSS context is required only when there is more than one RSS ring */
766 if (vnic->rx_queue_cnt > 1) {
767 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0 /* ctx_idx 0 */);
770 "HWRM vnic ctx alloc failure: %x\n", rc);
774 PMD_DRV_LOG(DEBUG, "No RSS context required\n");
777 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
778 vnic->vlan_strip = true;
780 vnic->vlan_strip = false;
782 rc = bnxt_hwrm_vnic_cfg(bp, vnic);
786 bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
793 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
794 const struct rte_flow_item pattern[],
795 const struct rte_flow_action actions[],
796 const struct rte_flow_attr *attr,
797 struct rte_flow_error *error,
798 struct bnxt_filter_info *filter)
800 const struct rte_flow_action *act =
801 bnxt_flow_non_void_action(actions);
802 struct bnxt *bp = dev->data->dev_private;
803 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
804 const struct rte_flow_action_queue *act_q;
805 const struct rte_flow_action_vf *act_vf;
806 struct bnxt_vnic_info *vnic, *vnic0;
807 struct bnxt_filter_info *filter1;
808 struct bnxt_rx_queue *rxq = NULL;
809 int dflt_vnic, vnic_id;
814 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
818 rc = bnxt_flow_parse_attr(attr, error);
822 /* Since we support ingress attribute only - right now. */
823 if (filter->filter_type == HWRM_CFA_EM_FILTER)
824 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
827 case RTE_FLOW_ACTION_TYPE_QUEUE:
828 /* Allow this flow. Redirect to a VNIC. */
829 act_q = (const struct rte_flow_action_queue *)act->conf;
830 if (!act_q->index || act_q->index >= bp->rx_nr_rings) {
831 rte_flow_error_set(error,
833 RTE_FLOW_ERROR_TYPE_ACTION,
835 "Invalid queue ID.");
839 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
841 vnic_id = attr->group;
843 PMD_DRV_LOG(DEBUG, "Group id is 0\n");
844 vnic_id = act_q->index;
847 vnic = &bp->vnic_info[vnic_id];
849 rte_flow_error_set(error,
851 RTE_FLOW_ERROR_TYPE_ACTION,
853 "No matching VNIC found.");
857 if (vnic->rx_queue_cnt) {
858 if (vnic->start_grp_id != act_q->index) {
860 "VNIC already in use\n");
861 rte_flow_error_set(error,
863 RTE_FLOW_ERROR_TYPE_ACTION,
865 "VNIC already in use");
872 rxq = bp->rx_queues[act_q->index];
874 if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) && rxq)
878 bp->vnic_info[0].fw_grp_ids[act_q->index] !=
879 INVALID_HW_RING_ID ||
880 !rxq->rx_deferred_start) {
882 "Queue invalid or used with other VNIC\n");
883 rte_flow_error_set(error,
885 RTE_FLOW_ERROR_TYPE_ACTION,
887 "Queue invalid queue or in use");
894 vnic->rx_queue_cnt++;
895 vnic->start_grp_id = act_q->index;
896 vnic->end_grp_id = act_q->index;
897 vnic->func_default = 0; //This is not a default VNIC.
899 PMD_DRV_LOG(DEBUG, "VNIC found\n");
901 rc = bnxt_vnic_prep(bp, vnic);
906 "vnic[%d] = %p vnic->fw_grp_ids = %p\n",
907 act_q->index, vnic, vnic->fw_grp_ids);
909 vnic->ff_pool_idx = vnic_id;
911 "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
912 filter->dst_id = vnic->fw_vnic_id;
913 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
914 if (filter1 == NULL) {
919 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
920 PMD_DRV_LOG(DEBUG, "VNIC found\n");
922 case RTE_FLOW_ACTION_TYPE_DROP:
923 vnic0 = &bp->vnic_info[0];
924 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
925 if (filter1 == NULL) {
930 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
931 if (filter->filter_type == HWRM_CFA_EM_FILTER)
933 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
936 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
938 case RTE_FLOW_ACTION_TYPE_COUNT:
939 vnic0 = &bp->vnic_info[0];
940 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
941 if (filter1 == NULL) {
946 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
947 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
949 case RTE_FLOW_ACTION_TYPE_VF:
950 act_vf = (const struct rte_flow_action_vf *)act->conf;
953 if (filter->tunnel_type ==
954 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
955 filter->tunnel_type ==
956 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
957 /* If issued on a VF, ensure id is 0 and is trusted */
959 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
960 rte_flow_error_set(error, EINVAL,
961 RTE_FLOW_ERROR_TYPE_ACTION,
969 filter->enables |= filter->tunnel_type;
970 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
974 if (vf >= bp->pdev->max_vfs) {
975 rte_flow_error_set(error,
977 RTE_FLOW_ERROR_TYPE_ACTION,
984 filter->mirror_vnic_id =
985 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
987 /* This simply indicates there's no driver loaded.
988 * This is not an error.
990 rte_flow_error_set(error,
992 RTE_FLOW_ERROR_TYPE_ACTION,
994 "Unable to get default VNIC for VF");
999 filter->mirror_vnic_id = dflt_vnic;
1000 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
1002 vnic0 = &bp->vnic_info[0];
1003 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
1004 if (filter1 == NULL) {
1009 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
1013 rte_flow_error_set(error,
1015 RTE_FLOW_ERROR_TYPE_ACTION,
1023 bnxt_free_filter(bp, filter1);
1024 filter1->fw_l2_filter_id = -1;
1027 act = bnxt_flow_non_void_action(++act);
1028 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1029 rte_flow_error_set(error,
1031 RTE_FLOW_ERROR_TYPE_ACTION,
1042 bnxt_flow_validate(struct rte_eth_dev *dev,
1043 const struct rte_flow_attr *attr,
1044 const struct rte_flow_item pattern[],
1045 const struct rte_flow_action actions[],
1046 struct rte_flow_error *error)
1048 struct bnxt *bp = dev->data->dev_private;
1049 struct bnxt_filter_info *filter;
1052 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1056 filter = bnxt_get_unused_filter(bp);
1057 if (filter == NULL) {
1058 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1062 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1064 /* No need to hold on to this filter if we are just validating flow */
1065 filter->fw_l2_filter_id = UINT64_MAX;
1066 bnxt_free_filter(bp, filter);
1072 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1074 struct bnxt_filter_info *mf;
1075 struct rte_flow *flow;
1078 for (i = bp->max_vnics; i >= 0; i--) {
1079 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1081 if (vnic->fw_vnic_id == INVALID_VNIC_ID)
1084 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1087 if (mf->filter_type == nf->filter_type &&
1088 mf->flags == nf->flags &&
1089 mf->src_port == nf->src_port &&
1090 mf->src_port_mask == nf->src_port_mask &&
1091 mf->dst_port == nf->dst_port &&
1092 mf->dst_port_mask == nf->dst_port_mask &&
1093 mf->ip_protocol == nf->ip_protocol &&
1094 mf->ip_addr_type == nf->ip_addr_type &&
1095 mf->ethertype == nf->ethertype &&
1096 mf->vni == nf->vni &&
1097 mf->tunnel_type == nf->tunnel_type &&
1098 mf->l2_ovlan == nf->l2_ovlan &&
1099 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1100 mf->l2_ivlan == nf->l2_ivlan &&
1101 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1102 !memcmp(mf->l2_addr, nf->l2_addr,
1103 RTE_ETHER_ADDR_LEN) &&
1104 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1105 RTE_ETHER_ADDR_LEN) &&
1106 !memcmp(mf->src_macaddr, nf->src_macaddr,
1107 RTE_ETHER_ADDR_LEN) &&
1108 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1109 RTE_ETHER_ADDR_LEN) &&
1110 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1111 sizeof(nf->src_ipaddr)) &&
1112 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1113 sizeof(nf->src_ipaddr_mask)) &&
1114 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1115 sizeof(nf->dst_ipaddr)) &&
1116 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1117 sizeof(nf->dst_ipaddr_mask))) {
1118 if (mf->dst_id == nf->dst_id)
1120 /* Clear the new L2 filter that was created
1121 * earlier in bnxt_validate_and_parse_flow.
1123 bnxt_hwrm_clear_l2_filter(bp, nf);
1125 * Same Flow, Different queue
1126 * Clear the old ntuple filter
1127 * Reuse the matching L2 filter
1128 * ID for the new filter
1130 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1131 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1132 bnxt_hwrm_clear_em_filter(bp, mf);
1133 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1134 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1135 /* Free the old filter, update flow
1138 bnxt_free_filter(bp, mf);
1147 static struct rte_flow *
1148 bnxt_flow_create(struct rte_eth_dev *dev,
1149 const struct rte_flow_attr *attr,
1150 const struct rte_flow_item pattern[],
1151 const struct rte_flow_action actions[],
1152 struct rte_flow_error *error)
1154 struct bnxt *bp = dev->data->dev_private;
1155 struct bnxt_vnic_info *vnic = NULL;
1156 struct bnxt_filter_info *filter;
1157 bool update_flow = false;
1158 struct rte_flow *flow;
1163 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
1164 rte_flow_error_set(error, EINVAL,
1165 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1166 "Failed to create flow, Not a Trusted VF!");
1170 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1172 rte_flow_error_set(error, ENOMEM,
1173 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1174 "Failed to allocate memory");
1178 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1180 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1184 filter = bnxt_get_unused_filter(bp);
1185 if (filter == NULL) {
1186 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1190 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1195 ret = bnxt_match_filter(bp, filter);
1196 if (ret == -EEXIST) {
1197 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1198 /* Clear the filter that was created as part of
1199 * validate_and_parse_flow() above
1201 bnxt_hwrm_clear_l2_filter(bp, filter);
1203 } else if (ret == -EXDEV) {
1204 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1205 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1209 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1210 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1213 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1214 filter->enables == filter->tunnel_type) {
1215 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1217 rte_flow_error_set(error, -ret,
1218 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1219 "Unable to query tunnel to VF");
1222 if (tun_type == (1U << filter->tunnel_type)) {
1224 bnxt_hwrm_tunnel_redirect_free(bp,
1225 filter->tunnel_type);
1228 "Unable to free existing tunnel\n");
1229 rte_flow_error_set(error, -ret,
1230 RTE_FLOW_ERROR_TYPE_HANDLE,
1232 "Unable to free preexisting "
1237 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1239 rte_flow_error_set(error, -ret,
1240 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1241 "Unable to redirect tunnel to VF");
1244 vnic = &bp->vnic_info[0];
1248 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1250 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1251 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1254 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1256 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1257 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1260 for (i = 0; i < bp->max_vnics; i++) {
1261 vnic = &bp->vnic_info[i];
1262 if (vnic->fw_vnic_id != INVALID_VNIC_ID &&
1263 filter->dst_id == vnic->fw_vnic_id) {
1264 PMD_DRV_LOG(ERR, "Found matching VNIC Id %d\n",
1271 flow->filter = filter;
1273 /* VNIC is set only in case of queue or RSS action */
1276 * RxQ0 is not used for flow filters.
1283 STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
1285 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1286 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1290 flow->filter = filter;
1296 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1297 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1301 bnxt_free_filter(bp, filter);
1304 rte_flow_error_set(error, ret,
1305 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1306 "Matching Flow exists.");
1307 else if (ret == -EXDEV)
1308 rte_flow_error_set(error, ret,
1309 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1310 "Flow with pattern exists, updating destination queue");
1311 else if (!rte_errno)
1312 rte_flow_error_set(error, -ret,
1313 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1314 "Failed to create flow.");
1320 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1321 struct bnxt_filter_info *filter,
1322 struct rte_flow_error *error)
1324 uint16_t tun_dst_fid;
1328 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1330 rte_flow_error_set(error, -ret,
1331 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1332 "Unable to query tunnel to VF");
1335 if (tun_type == (1U << filter->tunnel_type)) {
1336 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1339 rte_flow_error_set(error, -ret,
1340 RTE_FLOW_ERROR_TYPE_HANDLE,
1342 "tunnel_redirect info cmd fail");
1345 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1346 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1348 /* Tunnel doesn't belong to this VF, so don't send HWRM
1349 * cmd, just delete the flow from driver
1351 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1353 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1355 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1356 filter->tunnel_type);
1362 bnxt_flow_destroy(struct rte_eth_dev *dev,
1363 struct rte_flow *flow,
1364 struct rte_flow_error *error)
1366 struct bnxt *bp = dev->data->dev_private;
1367 struct bnxt_filter_info *filter = flow->filter;
1368 struct bnxt_vnic_info *vnic = flow->vnic;
1376 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1377 filter->enables == filter->tunnel_type) {
1378 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1387 ret = bnxt_match_filter(bp, filter);
1389 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1390 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1391 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1392 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1393 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1395 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1399 bnxt_free_filter(bp, filter);
1400 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1403 rte_flow_error_set(error, -ret,
1404 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1405 "Failed to destroy flow.");
1412 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1414 struct bnxt *bp = dev->data->dev_private;
1415 struct bnxt_vnic_info *vnic;
1416 struct rte_flow *flow;
1420 for (i = 0; i < bp->nr_vnics; i++) {
1421 vnic = &bp->vnic_info[i];
1422 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1423 struct bnxt_filter_info *filter = flow->filter;
1425 if (filter->filter_type ==
1426 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1427 filter->enables == filter->tunnel_type) {
1429 bnxt_handle_tunnel_redirect_destroy(bp,
1438 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1439 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1440 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1441 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1447 RTE_FLOW_ERROR_TYPE_HANDLE,
1449 "Failed to flush flow in HW.");
1453 bnxt_free_filter(bp, filter);
1454 STAILQ_REMOVE(&vnic->flow_list, flow,
1463 const struct rte_flow_ops bnxt_flow_ops = {
1464 .validate = bnxt_flow_validate,
1465 .create = bnxt_flow_create,
1466 .destroy = bnxt_flow_destroy,
1467 .flush = bnxt_flow_flush,