p_hwfn->db_phys_addr = db_phys_addr;
if (IS_VF(p_dev))
- return ecore_vf_hw_prepare(p_hwfn);
+ return ecore_vf_hw_prepare(p_hwfn, p_params);
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
/* Indicates whether this PF serves a storage target */
bool b_is_target;
+
+ /* retry count for VF acquire on channel timeout */
+ u8 acquire_retry_cnt;
};
/**
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
PFVF_STATUS_MALICIOUS,
+ PFVF_STATUS_ACQUIRED,
};
struct ecore_mcp_link_params;
"CHANNEL_TLV_COALESCE_READ",
"CHANNEL_TLV_BULLETIN_UPDATE_MAC",
"CHANNEL_TLV_UPDATE_MTU",
+ "CHANNEL_TLV_RDMA_ACQUIRE",
+ "CHANNEL_TLV_RDMA_START",
+ "CHANNEL_TLV_RDMA_STOP",
+ "CHANNEL_TLV_RDMA_ADD_USER",
+ "CHANNEL_TLV_RDMA_REMOVE_USER",
+ "CHANNEL_TLV_RDMA_QUERY_COUNTERS",
+ "CHANNEL_TLV_RDMA_ALLOC_TID",
+ "CHANNEL_TLV_RDMA_REGISTER_TID",
+ "CHANNEL_TLV_RDMA_DEREGISTER_TID",
+ "CHANNEL_TLV_RDMA_FREE_TID",
+ "CHANNEL_TLV_RDMA_CREATE_CQ",
+ "CHANNEL_TLV_RDMA_RESIZE_CQ",
+ "CHANNEL_TLV_RDMA_DESTROY_CQ",
+ "CHANNEL_TLV_RDMA_CREATE_QP",
+ "CHANNEL_TLV_RDMA_MODIFY_QP",
+ "CHANNEL_TLV_RDMA_QUERY_QP",
+ "CHANNEL_TLV_RDMA_DESTROY_QP",
+ "CHANNEL_TLV_RDMA_CREATE_SRQ",
+ "CHANNEL_TLV_RDMA_MODIFY_SRQ",
+ "CHANNEL_TLV_RDMA_DESTROY_SRQ",
+ "CHANNEL_TLV_RDMA_QUERY_PORT",
+ "CHANNEL_TLV_RDMA_QUERY_DEVICE",
+ "CHANNEL_TLV_RDMA_IWARP_CONNECT",
+ "CHANNEL_TLV_RDMA_IWARP_ACCEPT",
+ "CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN",
+ "CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN",
+ "CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN",
+ "CHANNEL_TLV_RDMA_IWARP_REJECT",
+ "CHANNEL_TLV_RDMA_IWARP_SEND_RTR",
+ "CHANNEL_TLV_ESTABLISH_LL2_CONN",
+ "CHANNEL_TLV_TERMINATE_LL2_CONN",
+ "CHANNEL_TLV_ASYNC_EVENT",
+ "CHANNEL_TLV_SOFT_FLR",
"CHANNEL_TLV_MAX"
};
return _ecore_vf_pf_release(p_hwfn, true);
}
-#define VF_ACQUIRE_THRESH 3
static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
p_req->num_cids = p_resp->num_cids;
}
-static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t
+ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_soft_flr_tlv *req;
+ enum _ecore_status_t rc;
+
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc);
+
+ /* to release the mutex as ecore_vf_pf_acquire() take the mutex */
+ ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
+
+ /* As of today, there is no mechanism in place for VF to know the FLR
+ * status, so sufficiently (worst case time) wait for FLR to complete,
+ * as mailbox request to MFW by the PF for initiating VF flr and PF
+ * processing VF FLR could take time.
+ */
+ OSAL_MSLEEP(3000);
+
+ return ecore_vf_pf_acquire(p_hwfn);
+}
+
+enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct ecore_vf_acquire_sw_info vf_sw_info;
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 retry_cnt = p_iov->acquire_retry_cnt;
struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
/* send acquire request */
rc = ecore_send_msg2pf(p_hwfn,
&resp->hdr.status, sizeof(*resp));
+
+ if (retry_cnt && rc == ECORE_TIMEOUT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF retrying to acquire due to VPC timeout\n");
+ retry_cnt--;
+ continue;
+ }
+
if (rc != ECORE_SUCCESS)
goto exit;
resources_acquired = true;
} /* PF refuses to allocate our resources */
else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
- attempts < VF_ACQUIRE_THRESH) {
+ attempts < ECORE_VF_ACQUIRE_THRESH) {
ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
"PF rejected acquisition by VF\n");
rc = ECORE_INVAL;
goto exit;
+ } else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) {
+ ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
+ return ecore_vf_pf_soft_flr_acquire(p_hwfn);
} else {
DP_ERR(p_hwfn,
"PF returned err %d to VF acquisition request\n",
return 0;
}
-enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t
+ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_prepare_params *p_params)
{
struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
struct ecore_vf_iov *p_iov;
#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
+ p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt;
p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
#include "ecore_vf_api.h"
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
+#include "ecore_dev_api.h"
/* Default number of CIDs [total of both Rx and Tx] to be requested
* by default.
* bar or via the doorbell bar.
*/
bool b_doorbell_bar;
+
+ /* retry count for VF acquire on channel timeout */
+ u8 acquire_retry_cnt;
};
/**
enum _ecore_status_t ecore_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
u16 *p_coal,
struct ecore_queue_cid *p_cid);
+
+enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
* Coalesce value '0' will omit the configuration.
* sends ACQUIRE message
*
* @param p_hwfn
+ * @param p_params
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t
+ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_prepare_params *p_params);
/**
* @brief VF - start the RX Queue by sending a message to the PF
#include "ecore_mcp_api.h"
#ifdef CONFIG_ECORE_SRIOV
+
+#define ECORE_VF_ACQUIRE_THRESH 3
+
/**
* @brief Read the VF bulletin and act on it if needed
*
u8 padding[3];
};
+/* Soft FLR req */
+struct vfpf_soft_flr_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 reserved1;
+ u32 reserved2;
+};
+
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
struct vfpf_read_coal_req_tlv read_coal_req;
struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
struct vfpf_update_mtu_tlv update_mtu;
+ struct vfpf_soft_flr_tlv soft_flr;
struct tlv_buffer_size tlv_buf_size;
};
CHANNEL_TLV_COALESCE_READ,
CHANNEL_TLV_BULLETIN_UPDATE_MAC,
CHANNEL_TLV_UPDATE_MTU,
+ CHANNEL_TLV_RDMA_ACQUIRE,
+ CHANNEL_TLV_RDMA_START,
+ CHANNEL_TLV_RDMA_STOP,
+ CHANNEL_TLV_RDMA_ADD_USER,
+ CHANNEL_TLV_RDMA_REMOVE_USER,
+ CHANNEL_TLV_RDMA_QUERY_COUNTERS,
+ CHANNEL_TLV_RDMA_ALLOC_TID,
+ CHANNEL_TLV_RDMA_REGISTER_TID,
+ CHANNEL_TLV_RDMA_DEREGISTER_TID,
+ CHANNEL_TLV_RDMA_FREE_TID,
+ CHANNEL_TLV_RDMA_CREATE_CQ,
+ CHANNEL_TLV_RDMA_RESIZE_CQ,
+ CHANNEL_TLV_RDMA_DESTROY_CQ,
+ CHANNEL_TLV_RDMA_CREATE_QP,
+ CHANNEL_TLV_RDMA_MODIFY_QP,
+ CHANNEL_TLV_RDMA_QUERY_QP,
+ CHANNEL_TLV_RDMA_DESTROY_QP,
+ CHANNEL_TLV_RDMA_QUERY_PORT,
+ CHANNEL_TLV_RDMA_QUERY_DEVICE,
+ CHANNEL_TLV_RDMA_IWARP_CONNECT,
+ CHANNEL_TLV_RDMA_IWARP_ACCEPT,
+ CHANNEL_TLV_RDMA_IWARP_CREATE_LISTEN,
+ CHANNEL_TLV_RDMA_IWARP_DESTROY_LISTEN,
+ CHANNEL_TLV_RDMA_IWARP_PAUSE_LISTEN,
+ CHANNEL_TLV_RDMA_IWARP_REJECT,
+ CHANNEL_TLV_RDMA_IWARP_SEND_RTR,
+ CHANNEL_TLV_ESTABLISH_LL2_CONN,
+ CHANNEL_TLV_TERMINATE_LL2_CONN,
+ CHANNEL_TLV_ASYNC_EVENT,
+ CHANNEL_TLV_RDMA_CREATE_SRQ,
+ CHANNEL_TLV_RDMA_MODIFY_SRQ,
+ CHANNEL_TLV_RDMA_DESTROY_SRQ,
+ CHANNEL_TLV_SOFT_FLR,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
+#define DRV_MSG_CODE_INITIATE_VF_FLR 0x02020000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_INITIATE_VF_FLR_OK 0xb0030000
#define FW_MSG_CODE_ERR_RESOURCE_TEMPORARY_UNAVAILABLE 0x008b0000
#define FW_MSG_CODE_ERR_RESOURCE_ALREADY_ALLOCATED 0x008c0000
#define FW_MSG_CODE_ERR_RESOURCE_NOT_ALLOCATED 0x008d0000
qed_init_pci(edev, pci_dev);
memset(&hw_prepare_params, 0, sizeof(hw_prepare_params));
+
+ if (is_vf)
+ hw_prepare_params.acquire_retry_cnt = ECORE_VF_ACQUIRE_THRESH;
+
hw_prepare_params.personality = ECORE_PCI_ETH;
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;