1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
9 #include <rte_malloc.h>
11 #include <rte_flow_driver.h>
12 #include <rte_tailq.h>
15 #include "bnxt_filter.h"
16 #include "bnxt_hwrm.h"
17 #include "bnxt_vnic.h"
18 #include "bnxt_util.h"
19 #include "hsi_struct_def_dpdk.h"
22 bnxt_flow_args_validate(const struct rte_flow_attr *attr,
23 const struct rte_flow_item pattern[],
24 const struct rte_flow_action actions[],
25 struct rte_flow_error *error)
28 rte_flow_error_set(error,
30 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
37 rte_flow_error_set(error,
39 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
46 rte_flow_error_set(error,
48 RTE_FLOW_ERROR_TYPE_ATTR,
57 static const struct rte_flow_item *
58 bnxt_flow_non_void_item(const struct rte_flow_item *cur)
61 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
67 static const struct rte_flow_action *
68 bnxt_flow_non_void_action(const struct rte_flow_action *cur)
71 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
78 bnxt_filter_type_check(const struct rte_flow_item pattern[],
79 struct rte_flow_error *error __rte_unused)
81 const struct rte_flow_item *item =
82 bnxt_flow_non_void_item(pattern);
85 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
87 case RTE_FLOW_ITEM_TYPE_ETH:
90 case RTE_FLOW_ITEM_TYPE_VLAN:
93 case RTE_FLOW_ITEM_TYPE_IPV4:
94 case RTE_FLOW_ITEM_TYPE_IPV6:
95 case RTE_FLOW_ITEM_TYPE_TCP:
96 case RTE_FLOW_ITEM_TYPE_UDP:
98 /* need ntuple match, reset exact match */
101 "VLAN flow cannot use NTUPLE filter\n");
105 RTE_FLOW_ERROR_TYPE_ITEM,
107 "Cannot use VLAN with NTUPLE");
112 case RTE_FLOW_ITEM_TYPE_ANY:
116 PMD_DRV_LOG(DEBUG, "Unknown Flow type\n");
125 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
126 const struct rte_flow_attr *attr,
127 const struct rte_flow_item pattern[],
128 struct rte_flow_error *error,
129 struct bnxt_filter_info *filter)
131 const struct rte_flow_item *item = bnxt_flow_non_void_item(pattern);
132 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
133 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
134 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
135 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
136 const struct rte_flow_item_udp *udp_spec, *udp_mask;
137 const struct rte_flow_item_eth *eth_spec, *eth_mask;
138 const struct rte_flow_item_nvgre *nvgre_spec;
139 const struct rte_flow_item_nvgre *nvgre_mask;
140 const struct rte_flow_item_gre *gre_spec;
141 const struct rte_flow_item_gre *gre_mask;
142 const struct rte_flow_item_vxlan *vxlan_spec;
143 const struct rte_flow_item_vxlan *vxlan_mask;
144 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
145 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
146 const struct rte_flow_item_vf *vf_spec;
147 uint32_t tenant_id_be = 0;
153 uint32_t en_ethertype;
154 int dflt_vnic, rc = 0;
156 use_ntuple = bnxt_filter_type_check(pattern, error);
157 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
161 if (use_ntuple && (bp->eth_dev->data->dev_conf.rxmode.mq_mode &
163 PMD_DRV_LOG(ERR, "Cannot create ntuple flow on RSS queues\n");
164 rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
166 "Cannot create flow on RSS queues");
171 filter->filter_type = use_ntuple ?
172 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
173 en_ethertype = use_ntuple ?
174 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
175 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
177 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
179 /* last or range is NOT supported as match criteria */
180 rte_flow_error_set(error, EINVAL,
181 RTE_FLOW_ERROR_TYPE_ITEM,
183 "No support for range");
187 switch (item->type) {
188 case RTE_FLOW_ITEM_TYPE_ETH:
189 if (!item->spec || !item->mask)
192 eth_spec = item->spec;
193 eth_mask = item->mask;
195 /* Source MAC address mask cannot be partially set.
196 * Should be All 0's or all 1's.
197 * Destination MAC address mask must not be partially
198 * set. Should be all 1's or all 0's.
200 if ((!rte_is_zero_ether_addr(ð_mask->src) &&
201 !rte_is_broadcast_ether_addr(ð_mask->src)) ||
202 (!rte_is_zero_ether_addr(ð_mask->dst) &&
203 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
204 rte_flow_error_set(error,
206 RTE_FLOW_ERROR_TYPE_ITEM,
208 "MAC_addr mask not valid");
212 /* Mask is not allowed. Only exact matches are */
213 if (eth_mask->type &&
214 eth_mask->type != RTE_BE16(0xffff)) {
215 rte_flow_error_set(error, EINVAL,
216 RTE_FLOW_ERROR_TYPE_ITEM,
218 "ethertype mask not valid");
222 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
223 rte_memcpy(filter->dst_macaddr,
226 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
227 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
230 if (rte_is_broadcast_ether_addr(ð_mask->src)) {
231 rte_memcpy(filter->src_macaddr,
234 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
235 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
238 * PMD_DRV_LOG(ERR, "Handle this condition\n");
241 if (eth_mask->type) {
243 rte_be_to_cpu_16(eth_spec->type);
248 case RTE_FLOW_ITEM_TYPE_VLAN:
249 vlan_spec = item->spec;
250 vlan_mask = item->mask;
251 if (en & en_ethertype) {
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_ITEM,
255 "VLAN TPID matching is not"
259 if (vlan_mask->tci &&
260 vlan_mask->tci == RTE_BE16(0x0fff)) {
261 /* Only the VLAN ID can be matched. */
263 rte_be_to_cpu_16(vlan_spec->tci &
265 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
267 rte_flow_error_set(error,
269 RTE_FLOW_ERROR_TYPE_ITEM,
271 "VLAN mask is invalid");
274 if (vlan_mask->inner_type &&
275 vlan_mask->inner_type != RTE_BE16(0xffff)) {
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_ITEM,
279 "inner ethertype mask not"
283 if (vlan_mask->inner_type) {
285 rte_be_to_cpu_16(vlan_spec->inner_type);
290 case RTE_FLOW_ITEM_TYPE_IPV4:
291 /* If mask is not involved, we could use EM filters. */
292 ipv4_spec = item->spec;
293 ipv4_mask = item->mask;
295 if (!item->spec || !item->mask)
298 /* Only IP DST and SRC fields are maskable. */
299 if (ipv4_mask->hdr.version_ihl ||
300 ipv4_mask->hdr.type_of_service ||
301 ipv4_mask->hdr.total_length ||
302 ipv4_mask->hdr.packet_id ||
303 ipv4_mask->hdr.fragment_offset ||
304 ipv4_mask->hdr.time_to_live ||
305 ipv4_mask->hdr.next_proto_id ||
306 ipv4_mask->hdr.hdr_checksum) {
307 rte_flow_error_set(error,
309 RTE_FLOW_ERROR_TYPE_ITEM,
311 "Invalid IPv4 mask.");
315 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
316 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
319 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
320 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
322 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
323 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
325 if (ipv4_mask->hdr.src_addr) {
326 filter->src_ipaddr_mask[0] =
327 ipv4_mask->hdr.src_addr;
328 en |= !use_ntuple ? 0 :
329 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
332 if (ipv4_mask->hdr.dst_addr) {
333 filter->dst_ipaddr_mask[0] =
334 ipv4_mask->hdr.dst_addr;
335 en |= !use_ntuple ? 0 :
336 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
339 filter->ip_addr_type = use_ntuple ?
340 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
341 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
343 if (ipv4_spec->hdr.next_proto_id) {
344 filter->ip_protocol =
345 ipv4_spec->hdr.next_proto_id;
347 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
349 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
352 case RTE_FLOW_ITEM_TYPE_IPV6:
353 ipv6_spec = item->spec;
354 ipv6_mask = item->mask;
356 if (!item->spec || !item->mask)
359 /* Only IP DST and SRC fields are maskable. */
360 if (ipv6_mask->hdr.vtc_flow ||
361 ipv6_mask->hdr.payload_len ||
362 ipv6_mask->hdr.proto ||
363 ipv6_mask->hdr.hop_limits) {
364 rte_flow_error_set(error,
366 RTE_FLOW_ERROR_TYPE_ITEM,
368 "Invalid IPv6 mask.");
373 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
374 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
376 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
377 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
379 rte_memcpy(filter->src_ipaddr,
380 ipv6_spec->hdr.src_addr, 16);
381 rte_memcpy(filter->dst_ipaddr,
382 ipv6_spec->hdr.dst_addr, 16);
384 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
386 rte_memcpy(filter->src_ipaddr_mask,
387 ipv6_mask->hdr.src_addr, 16);
388 en |= !use_ntuple ? 0 :
389 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
392 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
394 rte_memcpy(filter->dst_ipaddr_mask,
395 ipv6_mask->hdr.dst_addr, 16);
396 en |= !use_ntuple ? 0 :
397 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
400 filter->ip_addr_type = use_ntuple ?
401 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
402 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
404 case RTE_FLOW_ITEM_TYPE_TCP:
405 tcp_spec = item->spec;
406 tcp_mask = item->mask;
408 if (!item->spec || !item->mask)
411 /* Check TCP mask. Only DST & SRC ports are maskable */
412 if (tcp_mask->hdr.sent_seq ||
413 tcp_mask->hdr.recv_ack ||
414 tcp_mask->hdr.data_off ||
415 tcp_mask->hdr.tcp_flags ||
416 tcp_mask->hdr.rx_win ||
417 tcp_mask->hdr.cksum ||
418 tcp_mask->hdr.tcp_urp) {
419 rte_flow_error_set(error,
421 RTE_FLOW_ERROR_TYPE_ITEM,
427 filter->src_port = tcp_spec->hdr.src_port;
428 filter->dst_port = tcp_spec->hdr.dst_port;
431 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
432 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
434 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
435 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
437 if (tcp_mask->hdr.dst_port) {
438 filter->dst_port_mask = tcp_mask->hdr.dst_port;
439 en |= !use_ntuple ? 0 :
440 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
443 if (tcp_mask->hdr.src_port) {
444 filter->src_port_mask = tcp_mask->hdr.src_port;
445 en |= !use_ntuple ? 0 :
446 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
449 case RTE_FLOW_ITEM_TYPE_UDP:
450 udp_spec = item->spec;
451 udp_mask = item->mask;
453 if (!item->spec || !item->mask)
456 if (udp_mask->hdr.dgram_len ||
457 udp_mask->hdr.dgram_cksum) {
458 rte_flow_error_set(error,
460 RTE_FLOW_ERROR_TYPE_ITEM,
466 filter->src_port = udp_spec->hdr.src_port;
467 filter->dst_port = udp_spec->hdr.dst_port;
470 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
471 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
473 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
474 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
476 if (udp_mask->hdr.dst_port) {
477 filter->dst_port_mask = udp_mask->hdr.dst_port;
478 en |= !use_ntuple ? 0 :
479 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
482 if (udp_mask->hdr.src_port) {
483 filter->src_port_mask = udp_mask->hdr.src_port;
484 en |= !use_ntuple ? 0 :
485 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
488 case RTE_FLOW_ITEM_TYPE_VXLAN:
489 vxlan_spec = item->spec;
490 vxlan_mask = item->mask;
491 /* Check if VXLAN item is used to describe protocol.
492 * If yes, both spec and mask should be NULL.
493 * If no, both spec and mask shouldn't be NULL.
495 if ((!vxlan_spec && vxlan_mask) ||
496 (vxlan_spec && !vxlan_mask)) {
497 rte_flow_error_set(error,
499 RTE_FLOW_ERROR_TYPE_ITEM,
501 "Invalid VXLAN item");
505 if (!vxlan_spec && !vxlan_mask) {
506 filter->tunnel_type =
507 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
511 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
512 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
513 vxlan_spec->flags != 0x8) {
514 rte_flow_error_set(error,
516 RTE_FLOW_ERROR_TYPE_ITEM,
518 "Invalid VXLAN item");
522 /* Check if VNI is masked. */
523 if (vxlan_spec && vxlan_mask) {
525 !!memcmp(vxlan_mask->vni, vni_mask,
531 RTE_FLOW_ERROR_TYPE_ITEM,
537 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
540 rte_be_to_cpu_32(tenant_id_be);
541 filter->tunnel_type =
542 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
545 case RTE_FLOW_ITEM_TYPE_NVGRE:
546 nvgre_spec = item->spec;
547 nvgre_mask = item->mask;
548 /* Check if NVGRE item is used to describe protocol.
549 * If yes, both spec and mask should be NULL.
550 * If no, both spec and mask shouldn't be NULL.
552 if ((!nvgre_spec && nvgre_mask) ||
553 (nvgre_spec && !nvgre_mask)) {
554 rte_flow_error_set(error,
556 RTE_FLOW_ERROR_TYPE_ITEM,
558 "Invalid NVGRE item");
562 if (!nvgre_spec && !nvgre_mask) {
563 filter->tunnel_type =
564 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
568 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
569 nvgre_spec->protocol != 0x6558) {
570 rte_flow_error_set(error,
572 RTE_FLOW_ERROR_TYPE_ITEM,
574 "Invalid NVGRE item");
578 if (nvgre_spec && nvgre_mask) {
580 !!memcmp(nvgre_mask->tni, tni_mask,
586 RTE_FLOW_ERROR_TYPE_ITEM,
591 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
594 rte_be_to_cpu_32(tenant_id_be);
595 filter->tunnel_type =
596 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
600 case RTE_FLOW_ITEM_TYPE_GRE:
601 gre_spec = (const struct rte_flow_item_gre *)item->spec;
602 gre_mask = (const struct rte_flow_item_gre *)item->mask;
605 *Check if GRE item is used to describe protocol.
606 * If yes, both spec and mask should be NULL.
607 * If no, both spec and mask shouldn't be NULL.
609 if (!!gre_spec ^ !!gre_mask) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
617 if (!gre_spec && !gre_mask) {
618 filter->tunnel_type =
619 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE;
624 case RTE_FLOW_ITEM_TYPE_VF:
625 vf_spec = item->spec;
628 rte_flow_error_set(error,
630 RTE_FLOW_ERROR_TYPE_ITEM,
632 "Configuring on a VF!");
636 if (vf >= bp->pdev->max_vfs) {
637 rte_flow_error_set(error,
639 RTE_FLOW_ERROR_TYPE_ITEM,
645 if (!attr->transfer) {
646 rte_flow_error_set(error,
648 RTE_FLOW_ERROR_TYPE_ITEM,
650 "Matching VF traffic without"
651 " affecting it (transfer attribute)"
656 filter->mirror_vnic_id =
657 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
659 /* This simply indicates there's no driver
660 * loaded. This is not an error.
665 RTE_FLOW_ERROR_TYPE_ITEM,
667 "Unable to get default VNIC for VF");
671 filter->mirror_vnic_id = dflt_vnic;
672 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
679 filter->enables = en;
684 /* Parse attributes */
686 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
687 struct rte_flow_error *error)
689 /* Must be input direction */
690 if (!attr->ingress) {
691 rte_flow_error_set(error,
693 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
695 "Only support ingress.");
701 rte_flow_error_set(error,
703 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
705 "No support for egress.");
710 if (attr->priority) {
711 rte_flow_error_set(error,
713 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
715 "No support for priority.");
721 rte_flow_error_set(error,
723 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
725 "No support for group.");
732 struct bnxt_filter_info *
733 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
734 struct bnxt_vnic_info *vnic)
736 struct bnxt_filter_info *filter1, *f0;
737 struct bnxt_vnic_info *vnic0;
740 vnic0 = &bp->vnic_info[0];
741 f0 = STAILQ_FIRST(&vnic0->filter);
743 /* This flow has same DST MAC as the port/l2 filter. */
744 if (memcmp(f0->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN) == 0)
747 /* This flow needs DST MAC which is not same as port/l2 */
748 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
749 filter1 = bnxt_get_unused_filter(bp);
753 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
754 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
755 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
756 memcpy(filter1->l2_addr, nf->dst_macaddr, RTE_ETHER_ADDR_LEN);
757 memset(filter1->l2_addr_mask, 0xff, RTE_ETHER_ADDR_LEN);
758 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
761 bnxt_free_filter(bp, filter1);
768 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
769 const struct rte_flow_item pattern[],
770 const struct rte_flow_action actions[],
771 const struct rte_flow_attr *attr,
772 struct rte_flow_error *error,
773 struct bnxt_filter_info *filter)
775 const struct rte_flow_action *act =
776 bnxt_flow_non_void_action(actions);
777 struct bnxt *bp = dev->data->dev_private;
778 const struct rte_flow_action_queue *act_q;
779 const struct rte_flow_action_vf *act_vf;
780 struct bnxt_vnic_info *vnic, *vnic0;
781 struct bnxt_filter_info *filter1;
787 bnxt_validate_and_parse_flow_type(bp, attr, pattern, error, filter);
791 rc = bnxt_flow_parse_attr(attr, error);
795 /* Since we support ingress attribute only - right now. */
796 if (filter->filter_type == HWRM_CFA_EM_FILTER)
797 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
800 case RTE_FLOW_ACTION_TYPE_QUEUE:
801 /* Allow this flow. Redirect to a VNIC. */
802 act_q = (const struct rte_flow_action_queue *)act->conf;
803 if (act_q->index >= bp->rx_nr_rings) {
804 rte_flow_error_set(error,
806 RTE_FLOW_ERROR_TYPE_ACTION,
808 "Invalid queue ID.");
812 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
814 vnic0 = &bp->vnic_info[0];
815 vnic = &bp->vnic_info[act_q->index];
817 rte_flow_error_set(error,
819 RTE_FLOW_ERROR_TYPE_ACTION,
821 "No matching VNIC for queue ID.");
826 filter->dst_id = vnic->fw_vnic_id;
827 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
828 if (filter1 == NULL) {
833 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
834 PMD_DRV_LOG(DEBUG, "VNIC found\n");
836 case RTE_FLOW_ACTION_TYPE_DROP:
837 vnic0 = &bp->vnic_info[0];
838 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
839 if (filter1 == NULL) {
844 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
845 if (filter->filter_type == HWRM_CFA_EM_FILTER)
847 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
850 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
852 case RTE_FLOW_ACTION_TYPE_COUNT:
853 vnic0 = &bp->vnic_info[0];
854 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
855 if (filter1 == NULL) {
860 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
861 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
863 case RTE_FLOW_ACTION_TYPE_VF:
864 act_vf = (const struct rte_flow_action_vf *)act->conf;
867 if (filter->tunnel_type ==
868 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN ||
869 filter->tunnel_type ==
870 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE) {
871 /* If issued on a VF, ensure id is 0 and is trusted */
873 if (!BNXT_VF_IS_TRUSTED(bp) || vf) {
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ACTION,
883 filter->enables |= filter->tunnel_type;
884 filter->filter_type = HWRM_CFA_TUNNEL_REDIRECT_FILTER;
888 if (vf >= bp->pdev->max_vfs) {
889 rte_flow_error_set(error,
891 RTE_FLOW_ERROR_TYPE_ACTION,
898 filter->mirror_vnic_id =
899 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
901 /* This simply indicates there's no driver loaded.
902 * This is not an error.
904 rte_flow_error_set(error,
906 RTE_FLOW_ERROR_TYPE_ACTION,
908 "Unable to get default VNIC for VF");
913 filter->mirror_vnic_id = dflt_vnic;
914 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
916 vnic0 = &bp->vnic_info[0];
917 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
918 if (filter1 == NULL) {
923 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
927 rte_flow_error_set(error,
929 RTE_FLOW_ERROR_TYPE_ACTION,
937 bnxt_free_filter(bp, filter1);
938 filter1->fw_l2_filter_id = -1;
941 act = bnxt_flow_non_void_action(++act);
942 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
943 rte_flow_error_set(error,
945 RTE_FLOW_ERROR_TYPE_ACTION,
956 bnxt_flow_validate(struct rte_eth_dev *dev,
957 const struct rte_flow_attr *attr,
958 const struct rte_flow_item pattern[],
959 const struct rte_flow_action actions[],
960 struct rte_flow_error *error)
962 struct bnxt *bp = dev->data->dev_private;
963 struct bnxt_filter_info *filter;
966 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
970 filter = bnxt_get_unused_filter(bp);
971 if (filter == NULL) {
972 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
976 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
978 /* No need to hold on to this filter if we are just validating flow */
979 filter->fw_l2_filter_id = UINT64_MAX;
980 bnxt_free_filter(bp, filter);
986 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
988 struct bnxt_filter_info *mf;
989 struct rte_flow *flow;
992 for (i = bp->nr_vnics - 1; i >= 0; i--) {
993 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
995 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
998 if (mf->filter_type == nf->filter_type &&
999 mf->flags == nf->flags &&
1000 mf->src_port == nf->src_port &&
1001 mf->src_port_mask == nf->src_port_mask &&
1002 mf->dst_port == nf->dst_port &&
1003 mf->dst_port_mask == nf->dst_port_mask &&
1004 mf->ip_protocol == nf->ip_protocol &&
1005 mf->ip_addr_type == nf->ip_addr_type &&
1006 mf->ethertype == nf->ethertype &&
1007 mf->vni == nf->vni &&
1008 mf->tunnel_type == nf->tunnel_type &&
1009 mf->l2_ovlan == nf->l2_ovlan &&
1010 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1011 mf->l2_ivlan == nf->l2_ivlan &&
1012 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1013 !memcmp(mf->l2_addr, nf->l2_addr,
1014 RTE_ETHER_ADDR_LEN) &&
1015 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1016 RTE_ETHER_ADDR_LEN) &&
1017 !memcmp(mf->src_macaddr, nf->src_macaddr,
1018 RTE_ETHER_ADDR_LEN) &&
1019 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1020 RTE_ETHER_ADDR_LEN) &&
1021 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1022 sizeof(nf->src_ipaddr)) &&
1023 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1024 sizeof(nf->src_ipaddr_mask)) &&
1025 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1026 sizeof(nf->dst_ipaddr)) &&
1027 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1028 sizeof(nf->dst_ipaddr_mask))) {
1029 if (mf->dst_id == nf->dst_id)
1032 * Same Flow, Different queue
1033 * Clear the old ntuple filter
1034 * Reuse the matching L2 filter
1035 * ID for the new filter
1037 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1038 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1039 bnxt_hwrm_clear_em_filter(bp, mf);
1040 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1041 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1042 /* Free the old filter, update flow
1045 bnxt_free_filter(bp, mf);
1054 static struct rte_flow *
1055 bnxt_flow_create(struct rte_eth_dev *dev,
1056 const struct rte_flow_attr *attr,
1057 const struct rte_flow_item pattern[],
1058 const struct rte_flow_action actions[],
1059 struct rte_flow_error *error)
1061 struct bnxt *bp = dev->data->dev_private;
1062 struct bnxt_filter_info *filter;
1063 struct bnxt_vnic_info *vnic = NULL;
1064 bool update_flow = false;
1065 struct rte_flow *flow;
1070 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1072 rte_flow_error_set(error, ENOMEM,
1073 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1074 "Failed to allocate memory");
1078 ret = bnxt_flow_args_validate(attr, pattern, actions, error);
1080 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1084 filter = bnxt_get_unused_filter(bp);
1085 if (filter == NULL) {
1086 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1090 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1095 ret = bnxt_match_filter(bp, filter);
1096 if (ret == -EEXIST) {
1097 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1098 /* Clear the filter that was created as part of
1099 * validate_and_parse_flow() above
1101 bnxt_hwrm_clear_l2_filter(bp, filter);
1103 } else if (ret == -EXDEV) {
1104 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists\n");
1105 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1109 /* If tunnel redirection to a VF/PF is specified then only tunnel_type
1110 * is set and enable is set to the tunnel type. Issue hwrm cmd directly
1113 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1114 filter->enables == filter->tunnel_type) {
1115 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1117 rte_flow_error_set(error, -ret,
1118 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1119 "Unable to query tunnel to VF");
1122 if (tun_type == (1U << filter->tunnel_type)) {
1124 bnxt_hwrm_tunnel_redirect_free(bp,
1125 filter->tunnel_type);
1128 "Unable to free existing tunnel\n");
1129 rte_flow_error_set(error, -ret,
1130 RTE_FLOW_ERROR_TYPE_HANDLE,
1132 "Unable to free preexisting "
1137 ret = bnxt_hwrm_tunnel_redirect(bp, filter->tunnel_type);
1139 rte_flow_error_set(error, -ret,
1140 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1141 "Unable to redirect tunnel to VF");
1144 vnic = &bp->vnic_info[0];
1148 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1150 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1151 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1154 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1156 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1157 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1160 for (i = 0; i < bp->nr_vnics; i++) {
1161 vnic = &bp->vnic_info[i];
1162 if (filter->dst_id == vnic->fw_vnic_id)
1167 flow->filter = filter;
1173 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1174 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1178 bnxt_free_filter(bp, filter);
1181 rte_flow_error_set(error, ret,
1182 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1183 "Matching Flow exists.");
1184 else if (ret == -EXDEV)
1185 rte_flow_error_set(error, ret,
1186 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1187 "Flow with pattern exists, updating destination queue");
1189 rte_flow_error_set(error, -ret,
1190 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1191 "Failed to create flow.");
1197 static int bnxt_handle_tunnel_redirect_destroy(struct bnxt *bp,
1198 struct bnxt_filter_info *filter,
1199 struct rte_flow_error *error)
1201 uint16_t tun_dst_fid;
1205 ret = bnxt_hwrm_tunnel_redirect_query(bp, &tun_type);
1207 rte_flow_error_set(error, -ret,
1208 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1209 "Unable to query tunnel to VF");
1212 if (tun_type == (1U << filter->tunnel_type)) {
1213 ret = bnxt_hwrm_tunnel_redirect_info(bp, filter->tunnel_type,
1216 rte_flow_error_set(error, -ret,
1217 RTE_FLOW_ERROR_TYPE_HANDLE,
1219 "tunnel_redirect info cmd fail");
1222 PMD_DRV_LOG(INFO, "Pre-existing tunnel fid = %x vf->fid = %x\n",
1223 tun_dst_fid + bp->first_vf_id, bp->fw_fid);
1225 /* Tunnel doesn't belong to this VF, so don't send HWRM
1226 * cmd, just delete the flow from driver
1228 if (bp->fw_fid != (tun_dst_fid + bp->first_vf_id))
1230 "Tunnel does not belong to this VF, skip hwrm_tunnel_redirect_free\n");
1232 ret = bnxt_hwrm_tunnel_redirect_free(bp,
1233 filter->tunnel_type);
1239 bnxt_flow_destroy(struct rte_eth_dev *dev,
1240 struct rte_flow *flow,
1241 struct rte_flow_error *error)
1243 struct bnxt *bp = dev->data->dev_private;
1244 struct bnxt_filter_info *filter = flow->filter;
1245 struct bnxt_vnic_info *vnic = flow->vnic;
1248 if (filter->filter_type == HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1249 filter->enables == filter->tunnel_type) {
1250 ret = bnxt_handle_tunnel_redirect_destroy(bp,
1259 ret = bnxt_match_filter(bp, filter);
1261 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1262 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1263 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1264 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1265 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1267 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1271 bnxt_free_filter(bp, filter);
1272 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1275 rte_flow_error_set(error, -ret,
1276 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1277 "Failed to destroy flow.");
1284 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1286 struct bnxt *bp = dev->data->dev_private;
1287 struct bnxt_vnic_info *vnic;
1288 struct rte_flow *flow;
1292 for (i = 0; i < bp->nr_vnics; i++) {
1293 vnic = &bp->vnic_info[i];
1294 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1295 struct bnxt_filter_info *filter = flow->filter;
1297 if (filter->filter_type ==
1298 HWRM_CFA_TUNNEL_REDIRECT_FILTER &&
1299 filter->enables == filter->tunnel_type) {
1301 bnxt_handle_tunnel_redirect_destroy(bp,
1310 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1311 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1312 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1313 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1319 RTE_FLOW_ERROR_TYPE_HANDLE,
1321 "Failed to flush flow in HW.");
1325 bnxt_free_filter(bp, filter);
1326 STAILQ_REMOVE(&vnic->flow_list, flow,
1335 const struct rte_flow_ops bnxt_flow_ops = {
1336 .validate = bnxt_flow_validate,
1337 .create = bnxt_flow_create,
1338 .destroy = bnxt_flow_destroy,
1339 .flush = bnxt_flow_flush,