X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_flow.c;h=844bf1520f5099bef5aef85db648e7b645c0051f;hb=21b1677d87a23a9695be0cbcbdf38607a2d58f84;hp=499dcdf6b8efaf78bcf40e9d62b5baa70ae17205;hpb=87aefef133e3d670be365efdaa620f36d97ee4bd;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 499dcdf6b8..844bf1520f 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -188,11 +188,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "Parse inner header\n"); break; case RTE_FLOW_ITEM_TYPE_ETH: - if (!item->spec || !item->mask) + if (!item->spec) break; eth_spec = item->spec; - eth_mask = item->mask; + + if (item->mask) + eth_mask = item->mask; + else + eth_mask = &rte_flow_item_eth_mask; /* Source MAC address mask cannot be partially set. * Should be All 0's or all 1's. @@ -281,7 +285,12 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; - vlan_mask = item->mask; + + if (item->mask) + vlan_mask = item->mask; + else + vlan_mask = &rte_flow_item_vlan_mask; + if (en & en_ethertype) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -324,11 +333,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, case RTE_FLOW_ITEM_TYPE_IPV4: /* If mask is not involved, we could use EM filters. */ ipv4_spec = item->spec; - ipv4_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv4_mask = item->mask; + else + ipv4_mask = &rte_flow_item_ipv4_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || @@ -385,11 +398,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; - ipv6_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv6_mask = item->mask; + else + ipv6_mask = &rte_flow_item_ipv6_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || @@ -437,11 +454,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; - tcp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + tcp_mask = item->mask; + else + tcp_mask = &rte_flow_item_tcp_mask; + /* Check TCP mask. Only DST & SRC ports are maskable */ if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || @@ -482,11 +503,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; - udp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + udp_mask = item->mask; + else + udp_mask = &rte_flow_item_udp_mask; + if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { rte_flow_error_set(error, @@ -554,7 +579,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, } /* Check if VNI is masked. */ - if (vxlan_spec && vxlan_mask) { + if (vxlan_mask != NULL) { vni_masked = !!memcmp(vxlan_mask->vni, vni_mask, RTE_DIM(vni_mask)); @@ -1056,6 +1081,13 @@ start: } PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index); + if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) { + filter->flags = + HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX; + filter->dst_id = act_q->index; + goto skip_vnic_alloc; + } + vnic_id = attr->group; if (!vnic_id) { PMD_DRV_LOG(DEBUG, "Group id is 0\n"); @@ -1127,7 +1159,7 @@ use_vnic: PMD_DRV_LOG(DEBUG, "Setting vnic ff_idx %d\n", vnic->ff_pool_idx); filter->dst_id = vnic->fw_vnic_id; - +skip_vnic_alloc: /* For ntuple filter, create the L2 filter with default VNIC. * The user specified redirect queue will be set while creating * the ntuple filter in hardware. @@ -1365,13 +1397,15 @@ use_vnic: if (vnic->rx_queue_cnt > 1) { vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss->types); + vnic->hash_mode = + bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level); if (!rss->key_len) { /* If hash key has not been specified, * use random hash key. */ - prandom_bytes(vnic->rss_hash_key, - HW_HASH_KEY_SIZE); + bnxt_prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); } else { if (rss->key_len > HW_HASH_KEY_SIZE) memcpy(vnic->rss_hash_key, @@ -1806,7 +1840,10 @@ bnxt_flow_create(struct rte_eth_dev *dev, } } - vnic = find_matching_vnic(bp, filter); + if (BNXT_RFS_NEEDS_VNIC(bp)) + vnic = find_matching_vnic(bp, filter); + else + vnic = BNXT_GET_DEFAULT_VNIC(bp); done: if (!ret || update_flow) { flow->filter = filter; @@ -1816,9 +1853,6 @@ done: goto free_flow; } - STAILQ_INSERT_TAIL(&vnic->filter, filter, next); - PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); - STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); if (filter->valid_flags & BNXT_FLOW_MARK_FLAG) { PMD_DRV_LOG(DEBUG, "Mark action: mark id 0x%x, flow id 0x%x\n", @@ -1833,15 +1867,21 @@ done: RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Flow with mark id exists"); + bnxt_clear_one_vnic_filter(bp, filter); goto free_filter; } bp->mark_table[flow_id].valid = true; bp->mark_table[flow_id].mark_id = filter->mark; } + + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next); + if (BNXT_FLOW_XSTATS_EN(bp)) bp->flow_stat->flow_count++; bnxt_release_flow_lock(bp); bnxt_setup_flow_counter(bp); + PMD_DRV_LOG(DEBUG, "Successfully created flow.\n"); return flow; } @@ -1940,11 +1980,7 @@ _bnxt_flow_destroy(struct bnxt *bp, filter->flow_id = 0; } - if (filter->filter_type == HWRM_CFA_EM_FILTER) - ret = bnxt_hwrm_clear_em_filter(bp, filter); - if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) - ret = bnxt_hwrm_clear_ntuple_filter(bp, filter); - ret = bnxt_hwrm_clear_l2_filter(bp, filter); + ret = bnxt_clear_one_vnic_filter(bp, filter); done: if (!ret) {