X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_flow.c;h=73fd24cd9a126dd0920ecbd43aa2ee5771ddb131;hb=a0c2315a2a5f0a83d5d77ec745ac1de04a1e962b;hp=07d359edf69c2a03e6984e7f87f31227ec81a2ac;hpb=5d47d06b2c837bf98c6f365019c8daa2a069c1a5;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_flow.c b/drivers/net/bnxt/bnxt_flow.c index 07d359edf6..73fd24cd9a 100644 --- a/drivers/net/bnxt/bnxt_flow.c +++ b/drivers/net/bnxt/bnxt_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -188,11 +188,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "Parse inner header\n"); break; case RTE_FLOW_ITEM_TYPE_ETH: - if (!item->spec || !item->mask) + if (!item->spec) break; eth_spec = item->spec; - eth_mask = item->mask; + + if (item->mask) + eth_mask = item->mask; + else + eth_mask = &rte_flow_item_eth_mask; /* Source MAC address mask cannot be partially set. * Should be All 0's or all 1's. @@ -281,7 +285,12 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_VLAN: vlan_spec = item->spec; - vlan_mask = item->mask; + + if (item->mask) + vlan_mask = item->mask; + else + vlan_mask = &rte_flow_item_vlan_mask; + if (en & en_ethertype) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, @@ -324,11 +333,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, case RTE_FLOW_ITEM_TYPE_IPV4: /* If mask is not involved, we could use EM filters. */ ipv4_spec = item->spec; - ipv4_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv4_mask = item->mask; + else + ipv4_mask = &rte_flow_item_ipv4_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv4_mask->hdr.version_ihl || ipv4_mask->hdr.type_of_service || @@ -385,11 +398,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; - ipv6_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + ipv6_mask = item->mask; + else + ipv6_mask = &rte_flow_item_ipv6_mask; + /* Only IP DST and SRC fields are maskable. */ if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || @@ -437,11 +454,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; - tcp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + tcp_mask = item->mask; + else + tcp_mask = &rte_flow_item_tcp_mask; + /* Check TCP mask. Only DST & SRC ports are maskable */ if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || @@ -482,11 +503,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp, break; case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; - udp_mask = item->mask; - if (!item->spec || !item->mask) + if (!item->spec) break; + if (item->mask) + udp_mask = item->mask; + else + udp_mask = &rte_flow_item_udp_mask; + if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { rte_flow_error_set(error, @@ -900,6 +925,9 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic) uint64_t rx_offloads = dev_conf->rxmode.offloads; int rc; + if (bp->nr_vnics > bp->max_vnics - 1) + return -ENOMEM; + rc = bnxt_vnic_grp_alloc(bp, vnic); if (rc) goto ret; @@ -1379,8 +1407,8 @@ skip_vnic_alloc: /* If hash key has not been specified, * use random hash key. */ - prandom_bytes(vnic->rss_hash_key, - HW_HASH_KEY_SIZE); + bnxt_prandom_bytes(vnic->rss_hash_key, + HW_HASH_KEY_SIZE); } else { if (rss->key_len > HW_HASH_KEY_SIZE) memcpy(vnic->rss_hash_key, @@ -1525,6 +1553,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev, bnxt_hwrm_vnic_ctx_free(bp, vnic); bnxt_hwrm_vnic_free(bp, vnic); vnic->rx_queue_cnt = 0; + bp->nr_vnics--; PMD_DRV_LOG(DEBUG, "Free VNIC\n"); } } @@ -1986,6 +2015,7 @@ done: bnxt_hwrm_vnic_free(bp, vnic); vnic->rx_queue_cnt = 0; + bp->nr_vnics--; } } else { rte_flow_error_set(error, -ret,