net/qede: add infrastructure support for VF load
authorManish Chopra <manishc@marvell.com>
Fri, 25 Sep 2020 11:55:06 +0000 (04:55 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 30 Sep 2020 17:19:11 +0000 (19:19 +0200)
This patch adds necessary infrastructure support (required to handle
messages from VF and sending ramrod on behalf of VF's configuration
request from alarm handler context) to start/load the VF-PMD driver
instance on top of PF-PMD driver instance.

Signed-off-by: Manish Chopra <manishc@marvell.com>
Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
Signed-off-by: Rasesh Mody <rmody@marvell.com>
drivers/net/qede/base/bcm_osal.c
drivers/net/qede/base/bcm_osal.h
drivers/net/qede/base/ecore.h
drivers/net/qede/base/ecore_iov_api.h
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_main.c
drivers/net/qede/qede_sriov.c
drivers/net/qede/qede_sriov.h

index 65837b5..ef47339 100644 (file)
 #include "ecore_iov_api.h"
 #include "ecore_mcp_api.h"
 #include "ecore_l2_api.h"
+#include "../qede_sriov.h"
+
+int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn)
+{
+       int rc;
+
+       rc = qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+       if (rc) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Failed to schedule alarm handler rc=%d\n", rc);
+       }
+
+       return rc;
+}
+
+void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
+{
+       struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
+
+       if (!p_hwfn)
+               return;
+
+       OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
+       ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+       OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
+}
 
 /* Array of memzone pointers */
 static const struct rte_memzone *ecore_mz_mapping[RTE_MAX_MEMZONE];
index 681d2d5..e7212f4 100644 (file)
@@ -177,9 +177,12 @@ typedef pthread_mutex_t osal_mutex_t;
 
 /* DPC */
 
+void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie);
 #define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
-#define OSAL_DPC_INIT(dpc, hwfn) nothing
-#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_INIT(dpc, hwfn) \
+       OSAL_SPIN_LOCK_INIT(&(hwfn)->spq_lock)
+#define OSAL_POLL_MODE_DPC(hwfn) \
+       osal_poll_mode_dpc((osal_int_ptr_t)(p_hwfn))
 #define OSAL_DPC_SYNC(hwfn) nothing
 
 /* Lists */
@@ -344,10 +347,12 @@ u32 qede_find_first_zero_bit(u32 *bitmap, u32 length);
 
 /* SR-IOV channel */
 
+int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn);
 #define OSAL_VF_FLR_UPDATE(hwfn) nothing
 #define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
 #define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol)        (0)
-#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MSG(hwfn, vfid) \
+       osal_pf_vf_msg(hwfn)
 #define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
 #define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
 #define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
index 750e99a..6c8e6d4 100644 (file)
@@ -714,6 +714,10 @@ struct ecore_hwfn {
 
        /* @DPDK */
        struct ecore_ptt                *p_arfs_ptt;
+
+       /* DPDK specific, not the part of vanilla ecore */
+       osal_spinlock_t spq_lock;
+       u32 iov_task_flags;
 };
 
 enum ecore_mf_mode {
index 5450018..bd7c570 100644 (file)
@@ -14,6 +14,9 @@
 #define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
 #define ECORE_VF_ARRAY_LENGTH (3)
 
+#define ECORE_VF_ARRAY_GET_VFID(arr, vfid)     \
+       (((arr)[(vfid) / 64]) & (1ULL << ((vfid) % 64)))
+
 #define IS_VF(p_dev)           ((p_dev)->b_is_vf)
 #define IS_PF(p_dev)           (!((p_dev)->b_is_vf))
 #ifdef CONFIG_ECORE_SRIOV
index 27ec42e..3cc1419 100644 (file)
@@ -281,7 +281,9 @@ out:
 
 static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
 {
+       OSAL_SPIN_LOCK(&p_hwfn->spq_lock);
        ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn));
+       OSAL_SPIN_UNLOCK(&p_hwfn->spq_lock);
 }
 
 static void
index c113582..6a7bfc1 100644 (file)
@@ -221,7 +221,9 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
 
        for_each_hwfn(edev, i) {
                p_hwfn = &edev->hwfns[i];
-               if (!IS_PF(edev))
+               if (IS_PF(edev))
+                       rte_eal_alarm_cancel(qed_iov_pf_task, p_hwfn);
+               else
                        rte_eal_alarm_cancel(qede_vf_task, p_hwfn);
        }
 }
index ba4384e..6d620dd 100644 (file)
@@ -4,6 +4,14 @@
  * www.marvell.com
  */
 
+#include <rte_alarm.h>
+
+#include "base/bcm_osal.h"
+#include "base/ecore.h"
+#include "base/ecore_sriov.h"
+#include "base/ecore_mcp.h"
+#include "base/ecore_vf.h"
+
 #include "qede_sriov.h"
 
 static void qed_sriov_enable_qid_config(struct ecore_hwfn *hwfn,
@@ -83,3 +91,56 @@ void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param)
        if (num_vfs_param)
                qed_sriov_enable(edev, num_vfs_param);
 }
+
+static void qed_handle_vf_msg(struct ecore_hwfn *hwfn)
+{
+       u64 events[ECORE_VF_ARRAY_LENGTH];
+       struct ecore_ptt *ptt;
+       int i;
+
+       ptt = ecore_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_NOTICE(hwfn, true, "PTT acquire failed\n");
+               qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+               return;
+       }
+
+       ecore_iov_pf_get_pending_events(hwfn, events);
+
+       ecore_for_each_vf(hwfn, i) {
+               /* Skip VFs with no pending messages */
+               if (!ECORE_VF_ARRAY_GET_VFID(events, i))
+                       continue;
+
+               DP_VERBOSE(hwfn, ECORE_MSG_IOV,
+                          "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+                          i, hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
+
+               /* Copy VF's message to PF's request buffer for that VF */
+               if (ecore_iov_copy_vf_msg(hwfn, ptt, i))
+                       continue;
+
+               ecore_iov_process_mbx_req(hwfn, ptt, i);
+       }
+
+       ecore_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(void *arg)
+{
+       struct ecore_hwfn *p_hwfn = arg;
+
+       if (OSAL_GET_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags)) {
+               OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
+               qed_handle_vf_msg(p_hwfn);
+       }
+}
+
+int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)
+{
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Scheduling iov task [Flag: %d]\n",
+                  flag);
+
+       OSAL_SET_BIT(flag, &p_hwfn->iov_task_flags);
+       return rte_eal_alarm_set(1, qed_iov_pf_task, p_hwfn);
+}
index 6c85b1d..8b7fa7d 100644 (file)
@@ -4,6 +4,18 @@
  * www.marvell.com
  */
 
-#include "qede_ethdev.h"
-
 void qed_sriov_configure(struct ecore_dev *edev, int num_vfs_param);
+
+enum qed_iov_wq_flag {
+       QED_IOV_WQ_MSG_FLAG,
+       QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+       QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+       QED_IOV_WQ_STOP_WQ_FLAG,
+       QED_IOV_WQ_FLR_FLAG,
+       QED_IOV_WQ_TRUST_FLAG,
+       QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+       QED_IOV_WQ_DB_REC_HANDLER,
+};
+
+int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag);
+void qed_iov_pf_task(void *arg);