ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- switch (p_eqe->protocol_id) {
- case PROTOCOLID_COMMON:
- return ecore_sriov_eqe_event(p_hwfn,
- p_eqe->opcode,
- p_eqe->echo, &p_eqe->data);
- default:
+ ecore_spq_async_comp_cb cb;
+
+ if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
DP_NOTICE(p_hwfn,
true, "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
}
}
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return ECORE_INVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return ECORE_SUCCESS;
+}
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
+}
+
/***************************************************************************
* EQ API
***************************************************************************/
struct ecore_chain chain;
};
+typedef enum _ecore_status_t
+(*ecore_spq_async_comp_cb)(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+enum _ecore_status_t
+ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ ecore_spq_async_comp_cb cb);
+
+void
+ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
struct ecore_spq {
osal_spinlock_t lock;
u32 db_addr_offset;
struct core_db_data db_data;
+ ecore_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
};
struct ecore_port;
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
p_hwfn->pf_iov_info = p_sriov;
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
return ecore_iov_allocate_vfdb(p_hwfn);
}
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
}
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
*/
void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
-/**
- * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
- *
- * @param p_hwfn
- * @param opcode
- * @param echo
- * @param data
- */
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data);
-
/**
* @brief Mark structs of vfs that have been FLR-ed.
*