net/qede/base: support packet pacing
authorRasesh Mody <rasesh.mody@cavium.com>
Mon, 9 Apr 2018 04:48:09 +0000 (21:48 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Apr 2018 22:41:44 +0000 (00:41 +0200)
Add packet pacing support for PFs.
ecore client can request for enabling packet pacing at init time,
if requested then ecore is going to skip MCoS and SRIOV configurations.

Signed-off-by: Rasesh Mody <rasesh.mody@cavium.com>
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_dev.c
drivers/net/qede/base/ecore_dev_api.h
drivers/net/qede/base/ecore_l2.c
drivers/net/qede/qede_main.c

index c8e6311..b6541dc 100644 (file)
@@ -41,6 +41,9 @@
        ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |  \
         (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
 
+#define IS_ECORE_PACING(p_hwfn)        \
+       (!!(p_hwfn->b_en_pacing))
+
 #define MAX_HWFNS_PER_DEVICE   2
 #define NAME_SIZE 128 /* @DPDK */
 #define ECORE_WFQ_UNIT 100
@@ -680,6 +683,13 @@ struct ecore_hwfn {
        /* Mechanism for recovering from doorbell drop */
        struct ecore_db_recovery_info   db_recovery_info;
 
+       /* Enable/disable pacing, if request to enable then
+        * IOV and mcos configuration will be skipped.
+        * this actually reflects the value requested in
+        * struct ecore_hw_prepare_params by ecore client.
+        */
+       bool b_en_pacing;
+
        /* @DPDK */
        struct ecore_ptt                *p_arfs_ptt;
 };
@@ -932,12 +942,16 @@ void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
 #define PQ_FLAGS_ACK   (1 << 4)
 #define PQ_FLAGS_OFLD  (1 << 5)
 #define PQ_FLAGS_VFS   (1 << 6)
+#define PQ_FLAGS_LLT   (1 << 7)
 
 /* physical queue index for cm context intialization */
 u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
 u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
 u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
+
+/* qm vport for rate limit configuration */
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl);
 
 const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
 
index 112f854..b1e67e2 100644 (file)
@@ -513,11 +513,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
        /* feature flags */
        if (IS_ECORE_SRIOV(p_hwfn->p_dev))
                flags |= PQ_FLAGS_VFS;
+       if (IS_ECORE_PACING(p_hwfn))
+               flags |= PQ_FLAGS_RLS;
 
        /* protocol flags */
        switch (p_hwfn->hw_info.personality) {
        case ECORE_PCI_ETH:
-               flags |= PQ_FLAGS_MCOS;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        case ECORE_PCI_FCOE:
                flags |= PQ_FLAGS_OFLD;
@@ -526,11 +529,14 @@ static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
                flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
                break;
        case ECORE_PCI_ETH_ROCE:
-               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+               flags |= PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        case ECORE_PCI_ETH_IWARP:
-               flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
-                        PQ_FLAGS_OFLD;
+               flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+               if (!IS_ECORE_PACING(p_hwfn))
+                       flags |= PQ_FLAGS_MCOS;
                break;
        default:
                DP_ERR(p_hwfn, "unknown personality %d\n",
@@ -837,7 +843,7 @@ u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
        return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
 }
 
-u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
 {
        u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
 
@@ -847,6 +853,23 @@ u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
        return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
 }
 
+u16 ecore_get_qm_vport_idx_rl(struct ecore_hwfn *p_hwfn, u16 rl)
+{
+       u16 start_pq, pq, qm_pq_idx;
+
+       pq = ecore_get_cm_pq_idx_rl(p_hwfn, rl);
+       start_pq = p_hwfn->qm_info.start_pq;
+       qm_pq_idx = pq - start_pq - CM_TX_PQ_BASE;
+
+       if (qm_pq_idx > p_hwfn->qm_info.num_pqs) {
+               DP_ERR(p_hwfn,
+                      "qm_pq_idx %d must be smaller than %d\n",
+                       qm_pq_idx, p_hwfn->qm_info.num_pqs);
+       }
+
+       return p_hwfn->qm_info.qm_pq_params[qm_pq_idx].vport_id;
+}
+
 /* Functions for creating specific types of pqs */
 static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
 {
@@ -3878,8 +3901,13 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
        bool drv_resc_alloc = p_params->drv_resc_alloc;
        enum _ecore_status_t rc;
 
+       if (IS_ECORE_PACING(p_hwfn)) {
+               DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_IOV,
+                          "Skipping IOV as packet pacing is requested\n");
+       }
+
        /* Since all information is common, only first hwfns should do this */
-       if (IS_LEAD_HWFN(p_hwfn)) {
+       if (IS_LEAD_HWFN(p_hwfn) && !IS_ECORE_PACING(p_hwfn)) {
                rc = ecore_iov_hw_info(p_hwfn);
                if (rc != ECORE_SUCCESS) {
                        if (p_params->b_relaxed_probe)
@@ -3964,7 +3992,10 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
         * that can result in performance penalty in some cases. 4
         * represents a good tradeoff between performance and flexibility.
         */
-       p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+       if (IS_ECORE_PACING(p_hwfn))
+               p_hwfn->hw_info.num_hw_tc = 1;
+       else
+               p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
 
        /* start out with a single active tc. This can be increased either
         * by dcbx negotiation or by upper layer driver
@@ -4251,6 +4282,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
 
        p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
        p_dev->allow_mdump = p_params->allow_mdump;
+       p_hwfn->b_en_pacing = p_params->b_en_pacing;
 
        if (p_params->b_relaxed_probe)
                p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
@@ -4286,6 +4318,7 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
                                                          BAR_ID_1) / 2;
                p_doorbell = (void OSAL_IOMEM *)addr;
 
+               p_dev->hwfns[1].b_en_pacing = p_params->b_en_pacing;
                /* prepare second hw function */
                rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
                                             p_doorbell, p_params);
index f619683..29fb74b 100644 (file)
@@ -270,6 +270,9 @@ struct ecore_hw_prepare_params {
         */
        bool b_relaxed_probe;
        enum ecore_hw_prepare_result p_relaxed_res;
+
+       /* Enable/disable request by ecore client for pacing */
+       bool b_en_pacing;
 };
 
 /**
index 0883fd3..c897fa5 100644 (file)
@@ -1188,11 +1188,20 @@ ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
                            void OSAL_IOMEM * *pp_doorbell)
 {
        enum _ecore_status_t rc;
+       u16 pq_id;
 
-       /* TODO - set tc in the pq_params for multi-cos */
-       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
-                                       pbl_addr, pbl_size,
-                                       ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
+       /* TODO - set tc in the pq_params for multi-cos.
+        * If pacing is enabled then select queue according to
+        * rate limiter availability otherwise select queue based
+        * on multi cos.
+        */
+       if (IS_ECORE_PACING(p_hwfn))
+               pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
+       else
+               pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
+
+       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
+                                       pbl_size, pq_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
@@ -2278,3 +2287,22 @@ out:
 
        return rc;
 }
+
+enum _ecore_status_t
+ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
+                          struct ecore_ptt *p_ptt,
+                          struct ecore_queue_cid *p_cid, u32 rate)
+{
+       struct ecore_mcp_link_state *p_link;
+       u8 vport;
+
+       vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
+       p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
+                  "About to rate limit qm vport %d for queue %d with rate %d\n",
+                  vport, p_cid->rel.queue_id, rate);
+
+       return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
+                                  p_link->speed);
+}
index 650f2cf..2333ca0 100644 (file)
@@ -62,6 +62,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
        hw_prepare_params.chk_reg_fifo = false;
        hw_prepare_params.initiate_pf_flr = true;
        hw_prepare_params.allow_mdump = false;
+       hw_prepare_params.b_en_pacing = false;
        hw_prepare_params.epoch = (u32)time(NULL);
        rc = ecore_hw_prepare(edev, &hw_prepare_params);
        if (rc) {