-/*
+/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
* www.cavium.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
*/
#include "bcm_osal.h"
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"preparing to send %s tlv over vf pf channel\n",
- ecore_channel_tlvs_string[type]);
+ qede_ecore_channel_tlvs_string[type]);
/* Reset Request offset */
p_iov->offset = (u8 *)(p_iov->vf2pf_request);
return _ecore_vf_pf_release(p_hwfn, true);
}
-#define VF_ACQUIRE_THRESH 3
static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
p_req->num_cids = p_resp->num_cids;
}
-static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
+static enum _ecore_status_t
+ecore_vf_pf_soft_flr_acquire(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_soft_flr_tlv *req;
+ enum _ecore_status_t rc;
+
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_SOFT_FLR, sizeof(*req));
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "rc=0x%x\n", rc);
+
+ /* to release the mutex as ecore_vf_pf_acquire() take the mutex */
+ ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
+
+ /* As of today, there is no mechanism in place for VF to know the FLR
+ * status, so sufficiently (worst case time) wait for FLR to complete,
+ * as mailbox request to MFW by the PF for initiating VF flr and PF
+ * processing VF FLR could take time.
+ */
+ OSAL_MSLEEP(3000);
+
+ return ecore_vf_pf_acquire(p_hwfn);
+}
+
+enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct ecore_vf_acquire_sw_info vf_sw_info;
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
+ u8 retry_cnt = p_iov->acquire_retry_cnt;
struct vf_pf_resc_request *p_resc;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
/* send acquire request */
rc = ecore_send_msg2pf(p_hwfn,
&resp->hdr.status, sizeof(*resp));
+
+ if (retry_cnt && rc == ECORE_TIMEOUT) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF retrying to acquire due to VPC timeout\n");
+ retry_cnt--;
+ continue;
+ }
+
if (rc != ECORE_SUCCESS)
goto exit;
resources_acquired = true;
} /* PF refuses to allocate our resources */
else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
- attempts < VF_ACQUIRE_THRESH) {
+ attempts < ECORE_VF_ACQUIRE_THRESH) {
ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
&resp->resc);
"PF rejected acquisition by VF\n");
rc = ECORE_INVAL;
goto exit;
+ } else if (resp->hdr.status == PFVF_STATUS_ACQUIRED) {
+ ecore_vf_pf_req_end(p_hwfn, ECORE_AGAIN);
+ return ecore_vf_pf_soft_flr_acquire(p_hwfn);
} else {
DP_ERR(p_hwfn,
"PF returned err %d to VF acquisition request\n",
p_iov->bulletin.size = resp->bulletin_size;
/* get HW info */
- p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
- p_hwfn->p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
+ p_dev->type = resp->pfdev_info.dev_type;
+ p_dev->chip_rev = (u8)resp->pfdev_info.chip_rev;
DP_INFO(p_hwfn, "Chip details - %s%d\n",
- ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
+ ECORE_IS_BB(p_dev) ? "BB" : "AH",
CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
- p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
+ p_dev->chip_num = pfdev_info->chip_num & 0xffff;
/* Learn of the possibility of CMT */
if (IS_LEAD_HWFN(p_hwfn)) {
if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
DP_INFO(p_hwfn, "100g VF\n");
- p_hwfn->p_dev->num_hwfns = 2;
+ p_dev->num_hwfns = 2;
}
}
/* @DPDK */
- if ((~p_iov->b_pre_fp_hsi &
+ if (((p_iov->b_pre_fp_hsi == true) &
ETH_HSI_VER_MINOR) &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
DP_INFO(p_hwfn,
return 0;
}
-enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t
+ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_prepare_params *p_params)
{
struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_hwfn->p_dev);
struct ecore_vf_iov *p_iov;
phys,
p_iov->bulletin.
size);
+ if (!p_iov->bulletin.p_virt) {
+ DP_NOTICE(p_hwfn, false, "Failed to alloc bulletin memory\n");
+ goto free_pf2vf_reply;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
p_iov->bulletin.size);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex)) {
+ DP_NOTICE(p_hwfn, false, "Failed to allocate p_iov->mutex\n");
+ goto free_bulletin_mem;
+ }
#endif
OSAL_MUTEX_INIT(&p_iov->mutex);
+ p_iov->acquire_retry_cnt = p_params->acquire_retry_cnt;
p_hwfn->vf_iov_info = p_iov;
p_hwfn->hw_info.personality = ECORE_PCI_ETH;
return rc;
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+free_bulletin_mem:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->bulletin.p_virt,
+ p_iov->bulletin.phys,
+ p_iov->bulletin.size);
+#endif
+free_pf2vf_reply:
+ OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->pf2vf_reply,
+ p_iov->pf2vf_reply_phys,
+ sizeof(union pfvf_tlvs));
free_vf2pf_request:
OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
p_iov->vf2pf_request_phys,
return ECORE_NOMEM;
}
-#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
-#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
- (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-
/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
static void
__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)
- p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
return !!p_data->sge_tpa_params;
default:
DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
- tlv, ecore_channel_tlvs_string[tlv]);
+ tlv, qede_ecore_channel_tlvs_string[tlv]);
return false;
}
}
if (p_resp && p_resp->hdr.status)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV[%d] type %s Configuration %s\n",
- tlv, ecore_channel_tlvs_string[tlv],
+ tlv, qede_ecore_channel_tlvs_string[tlv],
(p_resp && p_resp->hdr.status) ? "succeeded"
: "failed");
}
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn, u16 mtu)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_mtu_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ enum _ecore_status_t rc;
+
+ if (!mtu)
+ return ECORE_INVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_MTU,
+ sizeof(*p_req));
+ p_req->mtu = mtu;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Requesting MTU update to %d\n", mtu);
+
+ /* add list termination tlv */
+ ecore_add_tlv(&p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ if (p_resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED)
+ rc = ECORE_INVAL;
+
+ ecore_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{