Extended stats = Y
Stats per queue = Y
Registers dump = Y
+SR-IOV = Y
Multiprocess aware = Y
Linux UIO = Y
Linux VFIO = Y
- VLAN offload - Filtering and stripping
- N-tuple filter and flow director (limited support)
- NPAR (NIC Partitioning)
-- SR-IOV VF
+- SR-IOV PF and VF
- GRE Tunneling offload
- GENEVE Tunneling offload
- VXLAN Tunneling offload
- MPLSoUDP Tx Tunneling offload
- Generic flow API
-Non-supported Features
-----------------------
-
-- SR-IOV PF
-
Co-existence considerations
---------------------------
return rc;
}
+void osal_vf_flr_update(struct ecore_hwfn *p_hwfn)
+{
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
void osal_poll_mode_dpc(osal_int_ptr_t hwfn_cookie)
{
struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
/* SR-IOV channel */
int osal_pf_vf_msg(struct ecore_hwfn *p_hwfn);
-#define OSAL_VF_FLR_UPDATE(hwfn) nothing
+void osal_vf_flr_update(struct ecore_hwfn *p_hwfn);
+#define OSAL_VF_FLR_UPDATE(hwfn) \
+ osal_vf_flr_update(hwfn)
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define OSAL_PF_VF_MSG(hwfn, vfid) \
void qed_iov_pf_task(void *arg)
{
struct ecore_hwfn *p_hwfn = arg;
+ int rc;
if (OSAL_GET_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags)) {
OSAL_CLEAR_BIT(QED_IOV_WQ_MSG_FLAG, &p_hwfn->iov_task_flags);
&p_hwfn->iov_task_flags);
qed_handle_bulletin_post(p_hwfn);
}
+
+ if (OSAL_GET_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags)) {
+ struct ecore_ptt *p_ptt = ecore_ptt_acquire(p_hwfn);
+
+ OSAL_CLEAR_BIT(QED_IOV_WQ_FLR_FLAG, &p_hwfn->iov_task_flags);
+
+ if (!p_ptt) {
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = ecore_iov_vf_flr_cleanup(p_hwfn, p_ptt);
+ if (rc)
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ ecore_ptt_release(p_hwfn, p_ptt);
+ }
}
int qed_schedule_iov(struct ecore_hwfn *p_hwfn, enum qed_iov_wq_flag flag)