net/bnxt: fix dynamic VNIC count
[dpdk.git] / drivers / net / bnxt / bnxt_flow.c
index 320b53d..73fd24c 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2018 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
  * All rights reserved.
  */
 
@@ -188,11 +188,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                                PMD_DRV_LOG(DEBUG, "Parse inner header\n");
                        break;
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       if (!item->spec || !item->mask)
+                       if (!item->spec)
                                break;
 
                        eth_spec = item->spec;
-                       eth_mask = item->mask;
+
+                       if (item->mask)
+                               eth_mask = item->mask;
+                       else
+                               eth_mask = &rte_flow_item_eth_mask;
 
                        /* Source MAC address mask cannot be partially set.
                         * Should be All 0's or all 1's.
@@ -281,7 +285,12 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
                        vlan_spec = item->spec;
-                       vlan_mask = item->mask;
+
+                       if (item->mask)
+                               vlan_mask = item->mask;
+                       else
+                               vlan_mask = &rte_flow_item_vlan_mask;
+
                        if (en & en_ethertype) {
                                rte_flow_error_set(error, EINVAL,
                                                   RTE_FLOW_ERROR_TYPE_ITEM,
@@ -324,11 +333,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        /* If mask is not involved, we could use EM filters. */
                        ipv4_spec = item->spec;
-                       ipv4_mask = item->mask;
 
-                       if (!item->spec || !item->mask)
+                       if (!item->spec)
                                break;
 
+                       if (item->mask)
+                               ipv4_mask = item->mask;
+                       else
+                               ipv4_mask = &rte_flow_item_ipv4_mask;
+
                        /* Only IP DST and SRC fields are maskable. */
                        if (ipv4_mask->hdr.version_ihl ||
                            ipv4_mask->hdr.type_of_service ||
@@ -385,11 +398,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        ipv6_spec = item->spec;
-                       ipv6_mask = item->mask;
 
-                       if (!item->spec || !item->mask)
+                       if (!item->spec)
                                break;
 
+                       if (item->mask)
+                               ipv6_mask = item->mask;
+                       else
+                               ipv6_mask = &rte_flow_item_ipv6_mask;
+
                        /* Only IP DST and SRC fields are maskable. */
                        if (ipv6_mask->hdr.vtc_flow ||
                            ipv6_mask->hdr.payload_len ||
@@ -437,11 +454,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        tcp_spec = item->spec;
-                       tcp_mask = item->mask;
 
-                       if (!item->spec || !item->mask)
+                       if (!item->spec)
                                break;
 
+                       if (item->mask)
+                               tcp_mask = item->mask;
+                       else
+                               tcp_mask = &rte_flow_item_tcp_mask;
+
                        /* Check TCP mask. Only DST & SRC ports are maskable */
                        if (tcp_mask->hdr.sent_seq ||
                            tcp_mask->hdr.recv_ack ||
@@ -482,11 +503,15 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        udp_spec = item->spec;
-                       udp_mask = item->mask;
 
-                       if (!item->spec || !item->mask)
+                       if (!item->spec)
                                break;
 
+                       if (item->mask)
+                               udp_mask = item->mask;
+                       else
+                               udp_mask = &rte_flow_item_udp_mask;
+
                        if (udp_mask->hdr.dgram_len ||
                            udp_mask->hdr.dgram_cksum) {
                                rte_flow_error_set(error,
@@ -554,7 +579,7 @@ bnxt_validate_and_parse_flow_type(struct bnxt *bp,
                        }
 
                        /* Check if VNI is masked. */
-                       if (vxlan_spec && vxlan_mask) {
+                       if (vxlan_mask != NULL) {
                                vni_masked =
                                        !!memcmp(vxlan_mask->vni, vni_mask,
                                                 RTE_DIM(vni_mask));
@@ -900,6 +925,9 @@ static int bnxt_vnic_prep(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        uint64_t rx_offloads = dev_conf->rxmode.offloads;
        int rc;
 
+       if (bp->nr_vnics > bp->max_vnics - 1)
+               return -ENOMEM;
+
        rc = bnxt_vnic_grp_alloc(bp, vnic);
        if (rc)
                goto ret;
@@ -1056,6 +1084,13 @@ start:
                }
                PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
 
+               if (use_ntuple && !BNXT_RFS_NEEDS_VNIC(bp)) {
+                       filter->flags =
+                               HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX;
+                       filter->dst_id = act_q->index;
+                       goto skip_vnic_alloc;
+               }
+
                vnic_id = attr->group;
                if (!vnic_id) {
                        PMD_DRV_LOG(DEBUG, "Group id is 0\n");
@@ -1127,7 +1162,7 @@ use_vnic:
                PMD_DRV_LOG(DEBUG,
                            "Setting vnic ff_idx %d\n", vnic->ff_pool_idx);
                filter->dst_id = vnic->fw_vnic_id;
-
+skip_vnic_alloc:
                /* For ntuple filter, create the L2 filter with default VNIC.
                 * The user specified redirect queue will be set while creating
                 * the ntuple filter in hardware.
@@ -1365,13 +1400,15 @@ use_vnic:
                if (vnic->rx_queue_cnt > 1) {
                        vnic->hash_type =
                                bnxt_rte_to_hwrm_hash_types(rss->types);
+                       vnic->hash_mode =
+                       bnxt_rte_to_hwrm_hash_level(bp, rss->types, rss->level);
 
                        if (!rss->key_len) {
                                /* If hash key has not been specified,
                                 * use random hash key.
                                 */
-                               prandom_bytes(vnic->rss_hash_key,
-                                             HW_HASH_KEY_SIZE);
+                               bnxt_prandom_bytes(vnic->rss_hash_key,
+                                                  HW_HASH_KEY_SIZE);
                        } else {
                                if (rss->key_len > HW_HASH_KEY_SIZE)
                                        memcpy(vnic->rss_hash_key,
@@ -1516,6 +1553,7 @@ bnxt_flow_validate(struct rte_eth_dev *dev,
                        bnxt_hwrm_vnic_ctx_free(bp, vnic);
                        bnxt_hwrm_vnic_free(bp, vnic);
                        vnic->rx_queue_cnt = 0;
+                       bp->nr_vnics--;
                        PMD_DRV_LOG(DEBUG, "Free VNIC\n");
                }
        }
@@ -1806,7 +1844,10 @@ bnxt_flow_create(struct rte_eth_dev *dev,
                }
        }
 
-       vnic = find_matching_vnic(bp, filter);
+       if (BNXT_RFS_NEEDS_VNIC(bp))
+               vnic = find_matching_vnic(bp, filter);
+       else
+               vnic = BNXT_GET_DEFAULT_VNIC(bp);
 done:
        if (!ret || update_flow) {
                flow->filter = filter;
@@ -1974,6 +2015,7 @@ done:
 
                        bnxt_hwrm_vnic_free(bp, vnic);
                        vnic->rx_queue_cnt = 0;
+                       bp->nr_vnics--;
                }
        } else {
                rte_flow_error_set(error, -ret,