#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
"CHANNEL_TLV_UPDATE_TUNN_PARAM",
"CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_QID",
+ "CHANNEL_TLV_COALESCE_READ",
"CHANNEL_TLV_MAX"
};
-static u8 ecore_vf_calculate_legacy(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
{
u8 legacy = 0;
}
static struct ecore_queue_cid *
-ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- struct ecore_vf_queue *p_queue)
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
{
int i;
ECORE_IOV_VALIDATE_Q_DISABLE,
};
-static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
u16 qid,
enum ecore_iov_validate_q_mode mode,
bool b_is_tx)
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
- mode, false);
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
return false;
}
- return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
- mode, true);
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
}
/* Is there at least 1 queue open? */
-static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
false))
return true;
return false;
}
-static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf)
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
{
u8 i;
for (i = 0; i < p_vf->num_txqs; i++)
- if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ if (ecore_iov_validate_queue_mode(p_vf, i,
ECORE_IOV_VALIDATE_Q_ENABLE,
true))
return true;
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sriov'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
p_hwfn->pf_iov_info = p_sriov;
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
return ecore_iov_allocate_vfdb(p_hwfn);
}
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->p_iov_info));
if (!p_dev->p_iov_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Can't support IOV due to lack of memory\n");
return ECORE_NOMEM;
}
ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
+static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vf_id,
+ u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* If client overrides this, don't do anything */
+ if (p_hwfn->p_dev->b_dont_override_vf_msix)
+ return ECORE_SUCCESS;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = OSAL_MAX_T(u8, current_max,
+ p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
{
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
if (vf->to_disable)
return ECORE_SUCCESS;
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
- rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
- vf->abs_vf_id, vf->num_sbs);
+ rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
return rc;
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
- u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
struct ecore_dmae_params params;
mbx->sw_mbx.response_size =
length + sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel)
+ if (!p_vf->b_hw_channel)
return;
#endif
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
¶ms);
+ /* Once PF copies the rc to the VF, the latter can continue and
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
sizeof(u64) / 4, ¶ms);
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
}
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
- enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
{
switch (flag) {
case ECORE_IOV_VP_UPDATE_ACTIVATE:
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
- resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
- ecore_iov_vport_to_tlv(p_hwfn, i), size);
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - vport_update resp: TLV %d, status %02x\n",
p_vf->relative_vf_id,
- ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
total_len += size;
}
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
mbx->offset = (u8 *)mbx->reply_virt;
- ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
-
- OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
}
struct ecore_public_vf_info
OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+ DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
- p_resp->num_cids =
- OSAL_MIN_T(u8, p_req->num_cids,
- p_hwfn->pf_params.eth_pf_params.num_vf_cons);
+ ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
return PFVF_STATUS_SUCCESS;
}
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
- struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
{
p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
}
/* On 100g PFs, prevent old VFs from loading */
- if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support"
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
- pfdev_info->indices_per_sb = PIS_PER_SB;
+ pfdev_info->indices_per_sb = PIS_PER_SB_E4;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
/* Share our ability to use multiple queue-ids only with VFs
if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
- ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+ p_ptt);
+
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
if (!p_vf->vport_instance)
return ECORE_INVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if ((events & (1 << MAC_ADDR_FORCED)) ||
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
return rc;
}
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ p_vf->configured_features |=
+ 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
struct ecore_queue_cid *p_cid = OSAL_NULL;
/* There can be at most 1 Rx queue on qzone. Find it */
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
- p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
if (p_cid == OSAL_NULL)
continue;
u8 status = PFVF_STATUS_SUCCESS;
enum _ecore_status_t rc;
+ OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
vf->vport_instance--;
vf->spoof_chk = false;
- if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
- (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
vf->b_malicious = true;
DP_NOTICE(p_hwfn, false,
"VF [%02x] - considered malicious;"
" Unable to stop RX/TX queuess\n",
vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
}
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
vf->configured_features = 0;
OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
- vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+ vf_legacy = ecore_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
OSAL_MEMSET(¶ms, 0, sizeof(params));
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- ¶ms, &vf_params);
+ ¶ms, true, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
}
send_resp:
- p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ p_resp = ecore_add_tlv(&mbx->offset,
CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
if (p_queue->cids[qid_usage_idx].p_cid)
goto out;
- vf_legacy = ecore_vf_calculate_legacy(p_hwfn, vf);
+ vf_legacy = ecore_vf_calculate_legacy(vf);
/* Acquire a new queue-cid */
params.queue_id = p_queue->fw_tx_qid;
vf_params.qid_usage_idx = qid_usage_idx;
p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
- ¶ms, &vf_params);
+ ¶ms, false, &vf_params);
if (p_cid == OSAL_NULL)
goto out;
p_queue->cids[qid_usage_idx].b_is_tx) {
struct ecore_queue_cid *p_cid;
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf, p_queue);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
vf->relative_vf_id, rxq_id, qid_usage_idx,
goto out;
}
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[q_idx]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
p_rss->rss_ind_table[i] = p_cid;
}
static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_sge_tpa_params *p_sge_tpa,
struct ecore_iov_vf_mbx *p_mbx,
ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
- ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
&sge_tpa_params, mbx, &tlvs_mask);
tlvs_accepted = tlvs_mask;
"Upper-layer prevents said VF"
" configuration\n");
else
- DP_NOTICE(p_hwfn, true,
- "No feature tlvs found for vport update\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
goto out;
}
- /* Update shadow copy of the VF configuration */
+ /* Update shadow copy of the VF configuration. In case shadow indicates
+ * the action should be blocked return success to VF to imitate the
+ * firmware behaviour in such case.
+ */
if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
- ECORE_SUCCESS) {
- status = PFVF_STATUS_FAILURE;
+ ECORE_SUCCESS)
goto out;
- }
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
length, status);
}
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_vf_queue *p_queue;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ } else {
+ if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+ p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
vf->abs_vf_id, rx_coal, tx_coal, qid);
if (rx_coal) {
- p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
- &vf->vf_queues[qid]);
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
if (rc != ECORE_SUCCESS) {
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
int i, cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
+ for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
u32 prod;
cons[i] = ecore_rd(p_hwfn, p_ptt,
/* Wait for consumers to pass the producers */
i = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
+ for (; i < MAX_NUM_VOQS_E4; i++) {
u32 tmp;
tmp = ecore_rd(p_hwfn, p_ptt,
break;
}
- if (i == MAX_NUM_VOQS)
+ if (i == MAX_NUM_VOQS_E4)
break;
OSAL_MSLEEP(20);
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
- __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ __ecore_vf_get_link_params(p_params, p_bulletin);
if (p_link)
- __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ __ecore_vf_get_link_state(p_link, p_bulletin);
if (p_caps)
- __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
}
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_COALESCE_UPDATE:
ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_COALESCE_READ:
+ ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
{
struct ecore_vf_info *p_vf;
- p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->errId);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn, false,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
}
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return i;
out:
- return E4_MAX_NUM_VFS;
+ return MAX_NUM_VFS_E4;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ feature = 1 << MAC_ADDR_FORCED;
+
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change)
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
return ECORE_SUCCESS;
}
+#ifndef LINUX_REMOVE
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid)
*opaque_fid = vf_info->opaque_fid;
}
+#endif
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
return sizeof(union pfvf_tlvs);
}
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ (1 << VFPF_BULLETIN_MAC_ADDR)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
{
struct ecore_vf_info *p_vf;
struct ecore_ptt *p_ptt,
int vfid, int val)
{
+ struct ecore_mcp_link_state *p_link;
struct ecore_vf_info *vf;
u8 abs_vp_id = 0;
enum _ecore_status_t rc;
if (rc != ECORE_SUCCESS)
return rc;
- return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
+
+ return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+ p_link->speed);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
else
return 0;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ vf_info->b_hw_channel = b_is_hw;
+}
+#endif