]> git.droids-corp.org - dpdk.git/commitdiff
net/bnxt: fix VF resource allocation
authorAjit Khaparde <ajit.khaparde@broadcom.com>
Tue, 22 May 2018 18:13:45 +0000 (11:13 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 22 May 2018 22:35:01 +0000 (00:35 +0200)
If the resource requirement of a VF is more than what is allotted
to it by the FW, a VF can request to reallocate some of the resources.

This is possible only when the NIC is running the Resource Manager aware
CHiMP FW.

If necessary, calculate Tx and Rx ring count using the new RM API.
Otherwise use the Tx and Rx ring count as-is. Update the cp ring count
based on the Tx and Rx ring count.

Fixes: b7778e8a1c00 ("net/bnxt: refactor to properly allocate resources for PF/VF")
Signed-off-by: Jay Ding <jay.ding@broadcom.com>
Signed-off-by: Scott Branden <scott.branden@broadcom.com>
Reviewed-by: Ray Jui <ray.jui@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
drivers/net/bnxt/bnxt.h
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bnxt/bnxt_hwrm.c
drivers/net/bnxt/bnxt_hwrm.h
drivers/net/bnxt/bnxt_ring.h

index a93fbf5334041900c3368b3c964e9cce9d5f3963..afaaf8c41b23da61a101795493484b27c36631a6 100644 (file)
@@ -220,6 +220,7 @@ struct bnxt {
 #define BNXT_FLAG_UPDATE_HASH  (1 << 5)
 #define BNXT_FLAG_PTP_SUPPORTED        (1 << 6)
 #define BNXT_FLAG_MULTI_HOST    (1 << 7)
+#define BNXT_FLAG_NEW_RM       (1 << 30)
 #define BNXT_FLAG_INIT_DONE    (1 << 31)
 #define BNXT_PF(bp)            (!((bp)->flags & BNXT_FLAG_VF))
 #define BNXT_VF(bp)            ((bp)->flags & BNXT_FLAG_VF)
index 0aba9a3fcd0a62c170c883c2d8311b54a4199cf5..62e02aa8af19df5b67281c0dea86e4fdb40afdd1 100644 (file)
@@ -406,9 +406,7 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
        /* PF/VF specifics */
        if (BNXT_PF(bp))
                dev_info->max_vfs = bp->pdev->max_vfs;
-       max_rx_rings = RTE_MIN(bp->max_vnics, RTE_MIN(bp->max_l2_ctx,
-                                               RTE_MIN(bp->max_rsscos_ctx,
-                                               bp->max_stat_ctx)));
+       max_rx_rings = RTE_MIN(bp->max_vnics, bp->max_stat_ctx);
        /* For the sake of symmetry, max_rx_queues = max_tx_queues */
        dev_info->max_rx_queues = max_rx_rings;
        dev_info->max_tx_queues = max_rx_rings;
@@ -494,6 +492,25 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
 
        bp->rx_queues = (void *)eth_dev->data->rx_queues;
        bp->tx_queues = (void *)eth_dev->data->tx_queues;
+       bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
+       bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
+
+       if (BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)) {
+               int rc;
+
+               rc = bnxt_hwrm_func_reserve_vf_resc(bp);
+               if (rc) {
+                       PMD_DRV_LOG(ERR, "HWRM resource alloc fail:%x\n", rc);
+                       return -ENOSPC;
+               }
+
+               /* legacy driver needs to get updated values */
+               rc = bnxt_hwrm_func_qcaps(bp);
+               if (rc) {
+                       PMD_DRV_LOG(ERR, "hwrm func qcaps fail:%d\n", rc);
+                       return -ENOSPC;
+               }
+       }
 
        /* Inherit new configurations */
        if (eth_dev->data->nb_rx_queues > bp->max_rx_rings ||
@@ -516,8 +533,6 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
                return -ENOSPC;
        }
 
-       bp->rx_nr_rings = eth_dev->data->nb_rx_queues;
-       bp->tx_nr_rings = eth_dev->data->nb_tx_queues;
        bp->rx_cp_nr_rings = bp->rx_nr_rings;
        bp->tx_cp_nr_rings = bp->tx_nr_rings;
 
index bf847a828a49986ba2e2fb9fe7d3991dd5af553a..ba4ef16760655321b602b1b555a774e9705d7018 100644 (file)
@@ -27,6 +27,7 @@
 #include <rte_io.h>
 
 #define HWRM_CMD_TIMEOUT               10000
+#define HWRM_SPEC_CODE_1_8_3           0x10803
 #define HWRM_VERSION_1_9_1             0x10901
 
 struct bnxt_plcmodes_cfg {
@@ -483,7 +484,7 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
        return 0;
 }
 
-int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc = 0;
        struct hwrm_func_qcaps_input req = {.req_type = 0 };
@@ -573,6 +574,20 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        return rc;
 }
 
+int bnxt_hwrm_func_qcaps(struct bnxt *bp)
+{
+       int rc;
+
+       rc = __bnxt_hwrm_func_qcaps(bp);
+       if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
+               rc = bnxt_hwrm_func_resc_qcaps(bp);
+               if (!rc)
+                       bp->flags |= BNXT_FLAG_NEW_RM;
+       }
+
+       return rc;
+}
+
 int bnxt_hwrm_func_reset(struct bnxt *bp)
 {
        int rc = 0;
@@ -642,6 +657,64 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        return rc;
 }
 
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
+{
+       int rc;
+       struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_vf_cfg_input req = {0};
+
+       HWRM_PREP(req, FUNC_VF_CFG);
+
+       req.enables = rte_cpu_to_le_32
+                       (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS  |
+                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS   |
+                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS  |
+                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
+                       HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
+
+       req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
+       req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
+                                           AGG_RING_MULTIPLIER);
+       req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
+       req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
+                                             bp->tx_nr_rings);
+       req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+       return rc;
+}
+
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
+{
+       int rc;
+       struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_func_resource_qcaps_input req = {0};
+
+       HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
+       req.fid = rte_cpu_to_le_16(0xffff);
+
+       rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+       HWRM_CHECK_RESULT();
+
+       if (BNXT_VF(bp)) {
+               bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
+               bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
+               bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
+               bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
+               bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
+               bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
+               bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
+               bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
+       }
+
+       HWRM_UNLOCK();
+       return rc;
+}
+
 int bnxt_hwrm_ver_get(struct bnxt *bp)
 {
        int rc = 0;
index 4813c7fb642425b10ef48c4f8a8206549ed69338..60a4ab16a06529d94d77fcd0ceb451a52fc8eda7 100644 (file)
@@ -111,6 +111,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp);
 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link);
 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up);
 int bnxt_hwrm_func_qcfg(struct bnxt *bp);
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp);
+int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp);
 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp);
 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs);
 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf,
index 6c86259e84f98226e444faee232eb04d52478a5e..65bf3e2f5ad64ea001a99f5f6c9957972900f5bb 100644 (file)
@@ -28,6 +28,7 @@
 
 #define BNXT_TPA_MAX           64
 #define AGG_RING_SIZE_FACTOR   2
+#define AGG_RING_MULTIPLIER    2
 
 /* These assume 4k pages */
 #define MAX_RX_DESC_CNT (8 * 1024)