-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
* All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
*/
#include "bcm_osal.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
-const char *ecore_channel_tlvs_string[] = {
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code);
+
+const char *qede_ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_NONE", /* ends tlv sequence */
"CHANNEL_TLV_ACQUIRE",
"CHANNEL_TLV_VPORT_START",
"CHANNEL_TLV_VPORT_UPDATE_RSS",
"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
+ "CHANNEL_TLV_QID",
+ "CHANNEL_TLV_COALESCE_READ",
+ "CHANNEL_TLV_BULLETIN_UPDATE_MAC",
+ "CHANNEL_TLV_UPDATE_MTU",
"CHANNEL_TLV_MAX"
};
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
+
/* IOV ramrods */
static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
p_ramrod->personality = PERSONALITY_ETH;
break;
case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
return vf;
}
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ u32 i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 rx_qid)
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 tx_qid)
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
return false;
}
-/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
{
- int i;
+ u8 i;
- while (length--) {
- crc ^= *ptr++;
- for (i = 0; i < 8; i++)
- crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
- }
- return crc;
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
}
enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
{
struct ecore_bulletin_content *p_bulletin;
int crc_size = sizeof(p_bulletin->crc);
- struct ecore_dmae_params params;
+ struct dmae_params params;
struct ecore_vf_info *p_vf;
p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
/* Increment bulletin board version and compute crc */
p_bulletin->version++;
- p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
- p_vf->bulletin.size - crc_size);
+ p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
/* propagate bulletin board via dmae to vm memory */
OSAL_MEMSET(¶ms, 0, sizeof(params));
- params.flags = ECORE_DMAE_FLAG_VF_DST;
- params.dst_vfid = p_vf->abs_vf_id;
+ SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
+ params.dst_vf_id = p_vf->abs_vf_id;
return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
¶ms);
return ECORE_SUCCESS;
}
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- struct ecore_igu_block *p_sb;
- u16 sb_id;
- u32 val;
-
- if (!p_hwfn->hw_info.p_igu_info) {
- DP_ERR(p_hwfn,
- "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
- return;
- }
-
- for (sb_id = 0;
- sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
- p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
- if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
- !(p_sb->status & ECORE_IGU_STATUS_PF)) {
- val = ecore_rd(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sb_id * 4);
- SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
- }
- }
-}
-
static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
{
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
return;
}
- p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
-
for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
/* TODO - need to devise a better way of getting opaque */
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
- /* @@TBD MichalK - add base vport_id of VFs to equation */
- vf->vport_id = p_iov_info->base_vport_id + idx;
vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
if (!p_sriov) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate `struct ecore_sriov'\n");
+ DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_sriov'\n");
return ECORE_NOMEM;
}
p_hwfn->pf_iov_info = p_sriov;
+ ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ ecore_sriov_eqe_event);
+
return ecore_iov_allocate_vfdb(p_hwfn);
}
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
{
if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
return;
ecore_iov_setup_vfdb(p_hwfn);
- ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
}
void ecore_iov_free(struct ecore_hwfn *p_hwfn)
{
+ ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
ecore_iov_free_vfdb(p_hwfn);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->p_iov_info));
if (!p_dev->p_iov_info) {
- DP_NOTICE(p_hwfn, true,
+ DP_NOTICE(p_hwfn, false,
"Can't support IOV due to lack of memory\n");
return ECORE_NOMEM;
}
return ECORE_SUCCESS;
}
-bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
- bool b_fail_malicious)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
+static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u8 abs_vf_id,
+ u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* If client overrides this, don't do anything */
+ if (p_hwfn->p_dev->b_dont_override_vf_msix)
+ return ECORE_SUCCESS;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = OSAL_MAX_T(u8, current_max,
+ p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
{
u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
- enum _ecore_status_t rc;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
if (vf->to_disable)
return ECORE_SUCCESS;
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
- /* It's possible VF was previously considered malicious */
- vf->b_malicious = false;
-
- rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
- vf->abs_vf_id, vf->num_sbs);
+ rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
return rc;
*
* @brief ecore_iov_config_perm_table - configure the permission
* zone table.
- * In E4, queue zone permission table size is 320x9. There
+ * The queue zone permission table size is 320x9. There
* are 320 VF queues for single engine device (256 for dual
* engine device), and each entry has the following format:
* {Valid, VF[7:0]}
struct ecore_vf_info *vf,
u16 num_rx_queues)
{
- struct ecore_igu_block *igu_blocks;
- int qid = 0, igu_id = 0;
+ struct ecore_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
u32 val = 0;
- igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
- if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
- num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
-
- p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues =
+ (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
- while ((qid < num_rx_queues) &&
- (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
- if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
- struct cau_sb_entry sb_entry;
-
- vf->igu_sbs[qid] = (u16)igu_id;
- igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
- SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
- ecore_wr(p_hwfn, p_ptt,
- IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
- val);
-
- /* Configure igu sb in CAU which were marked valid */
- ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
- p_hwfn->rel_pf_id,
- vf->abs_vf_id, 1);
- ecore_dmae_host2grc(p_hwfn, p_ptt,
- (u64)(osal_uintptr_t)&sb_entry,
- CAU_REG_SB_VAR_MEMORY +
- igu_id * sizeof(u64), 2, 0);
- qid++;
- }
- igu_id++;
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = ecore_get_igu_free_sb(p_hwfn, false);
+ if (!p_block)
+ continue;
+
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~ECORE_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ ecore_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+
+ ecore_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(osal_uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2,
+ OSAL_NULL /* default parameters */);
}
vf->num_sbs = (u8)num_rx_queues;
SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
ecore_wr(p_hwfn, p_ptt, addr, val);
- p_info->igu_map.igu_blocks[igu_id].status |=
- ECORE_IGU_STATUS_FREE;
-
- p_hwfn->hw_info.p_igu_info->free_blks++;
+ p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
}
vf->num_sbs = 0;
}
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ /* Increase the maximum number of DORQ FIFO entries used by child VFs */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT_LIM, 0x3ec);
+}
+#endif
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
if (vf->b_init) {
DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
return ECORE_INVAL;
}
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
+ return ECORE_INVAL;
+ }
+
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Since it's possible to relocate SBs, it's a bit difficult to check
+ * things here. Simply check whether the index falls in the range
+ * belonging to the PF.
+ */
+ for (i = 0; i < p_params->num_queues; i++) {
+ qid = p_params->req_rx_queue[i];
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ return ECORE_INVAL;
+ }
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues"
" [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16)cids);
- num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
- num_rx_queues);
+ num_irqs);
if (num_of_vf_available_chains == 0) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return ECORE_NOMEM;
vf->num_txqs = num_of_vf_available_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
-
- if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
- DP_NOTICE(p_hwfn, true,
- "VF[%d] will require utilizing of"
- " out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- /* TODO - cleanup the already allocate SBs */
- return ECORE_INVAL;
- }
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
- /* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- if (rc == ECORE_SUCCESS) {
- vf->b_init = true;
- p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+ vf->b_init = true;
+#ifndef REMOVE_DBG
+ p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
(1ULL << (vf->relative_vf_id % 64));
+#endif
- if (IS_LEAD_HWFN(p_hwfn))
- p_hwfn->p_dev->p_iov_info->num_vfs++;
- }
-
- return rc;
-}
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->p_dev->p_iov_info->num_vfs++;
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
- u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities *p_caps)
-{
- struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
- struct ecore_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ ecore_emul_iov_init_hw_for_vf(p_hwfn, p_ptt);
+#endif
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
+ return ECORE_SUCCESS;
+ }
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+#ifndef ASIC_ONLY
+static void ecore_emul_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (!ecore_mcp_is_init(p_hwfn)) {
+ u32 sriov_dis = ecore_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_SR_IOV_DISABLED_REQUEST);
- p_bulletin->capability_speed = p_caps->speed_capabilities;
+ ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST_CLR,
+ sriov_dis);
}
+}
+#endif
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
p_hwfn->p_dev->p_iov_info->num_vfs--;
}
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
+ ecore_emul_iov_release_hw_for_vf(p_hwfn, p_ptt);
+#endif
+
return ECORE_SUCCESS;
}
ECORE_MSG_IOV,
"VF[%d]: vf pf channel locked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[tlv]);
+ qede_ecore_channel_tlvs_string[tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
ECORE_MSG_IOV,
"VF[%d]: vf pf channel unlocked by %s\n",
vf->abs_vf_id,
- ecore_channel_tlvs_string[expected_tlv]);
+ qede_ecore_channel_tlvs_string[expected_tlv]);
else
DP_VERBOSE(p_hwfn,
ECORE_MSG_IOV,
}
/* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
- u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
{
struct channel_tlv *tl = (struct channel_tlv *)*offset;
if (ecore_iov_tlv_supported(tlv->type))
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"TLV number %d: type %s, length %d\n",
- i, ecore_channel_tlvs_string[tlv->type],
+ i, qede_ecore_channel_tlvs_string[tlv->type],
tlv->length);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
- u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+ u16 length,
+#else
+ u16 OSAL_UNUSED length,
+#endif
+ u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
- struct ecore_dmae_params params;
+ struct dmae_params params;
u8 eng_vf_id;
mbx->reply_virt->default_resp.hdr.status = status;
mbx->sw_mbx.response_size =
length + sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel)
+ if (!p_vf->b_hw_channel)
return;
#endif
eng_vf_id = p_vf->abs_vf_id;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
- params.flags = ECORE_DMAE_FLAG_VF_DST;
- params.dst_vfid = eng_vf_id;
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
+ SET_FIELD(params.flags, DMAE_PARAMS_DST_VF_VALID, 0x1);
+ params.dst_vf_id = eng_vf_id;
ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
mbx->req_virt->first_tlv.reply_address +
(sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
¶ms);
+ /* Once PF copies the rc to the VF, the latter can continue and
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
sizeof(u64) / 4, ¶ms);
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
}
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
- enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
{
switch (flag) {
case ECORE_IOV_VP_UPDATE_ACTIVATE:
size = sizeof(struct pfvf_def_resp_tlv);
total_len = size;
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
/* Prepare response for all extended tlvs if they are found by PF */
for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
if (!(tlvs_mask & (1 << i)))
continue;
- resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
- ecore_iov_vport_to_tlv(p_hwfn, i), size);
+ resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+ size);
if (tlvs_accepted & (1 << i))
resp->hdr.status = status;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - vport_update resp: TLV %d, status %02x\n",
p_vf->relative_vf_id,
- ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+ ecore_iov_vport_to_tlv(i),
+ resp->hdr.status);
total_len += size;
}
- ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
return total_len;
mbx->offset = (u8 *)mbx->reply_virt;
- ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ ecore_add_tlv(&mbx->offset, type, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
-
- OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
}
struct ecore_public_vf_info
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
p_vf->configured_features = 0;
p_vf->num_active_rxqs = 0;
- for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
- p_vf->vf_queues[i].rxq_active = 0;
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
}
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+ DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
for (i = 0; i < p_resp->num_rxqs; i++) {
ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
p_req->num_vlan_filters);
+ ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
/* This isn't really needed/enforced, but some legacy VFs might depend
* on the correct filling of this field.
*/
p_resp->num_sbs < p_req->num_sbs ||
p_resp->num_mac_filters < p_req->num_mac_filters ||
p_resp->num_vlan_filters < p_req->num_vlan_filters ||
- p_resp->num_mc_filters < p_req->num_mc_filters) {
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
- " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
- " vlan [%02x/%02x] mc [%02x/%02x]\n",
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
p_vf->abs_vf_id,
p_req->num_rxqs, p_resp->num_rxqs,
p_req->num_rxqs, p_resp->num_txqs,
p_req->num_sbs, p_resp->num_sbs,
p_req->num_mac_filters, p_resp->num_mac_filters,
p_req->num_vlan_filters, p_resp->num_vlan_filters,
- p_req->num_mc_filters, p_resp->num_mc_filters);
+ p_req->num_mc_filters, p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
/* Some legacy OSes are incapable of correctly handling this
* failure.
return PFVF_STATUS_SUCCESS;
}
-static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
- struct pfvf_stats_info *p_stats)
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
{
p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
}
/* On 100g PFs, prevent old VFs from loading */
- if ((p_hwfn->p_dev->num_hwfns > 1) &&
+ if (ECORE_IS_CMT(p_hwfn->p_dev) &&
!(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
DP_INFO(p_hwfn,
"VF[%d] is running an old driver that doesn't support"
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
- if (p_hwfn->p_dev->num_hwfns > 1)
+ if (ECORE_IS_CMT(p_hwfn->p_dev))
pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
- ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+ p_ptt);
+
+ ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
ETH_ALEN);
if (!p_vf->vport_instance)
return ECORE_INVAL;
- if (events & (1 << MAC_ADDR_FORCED)) {
+ if ((events & (1 << MAC_ADDR_FORCED)) ||
+ p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured) {
/* Since there's no way [currently] of removing the MAC,
* we can always assume this means we need to force it.
*/
return rc;
}
- p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
+ p_vf->configured_features |=
+ 1 << VFPF_BULLETIN_MAC_ADDR;
+ else
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
}
if (events & (1 << VLAN_ADDR_FORCED)) {
/* Update all the Rx queues */
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
- if (!p_vf->vf_queues[i].rxq_active)
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ if (p_cid == OSAL_NULL)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
" fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_sp_vport_start_params params = { 0 };
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct ecore_sp_vport_start_params params;
struct vfpf_vport_start_tlv *start;
u8 status = PFVF_STATUS_SUCCESS;
struct ecore_vf_info *vf_info;
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
}
- ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
*p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
}
+ OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_start_params));
params.tpa_mode = start->tpa_mode;
params.remove_inner_vlan = start->inner_vlan_removal;
params.tx_switching = true;
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
- params.check_mac = true;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
if (rc != ECORE_SUCCESS) {
u8 status = PFVF_STATUS_SUCCESS;
enum _ecore_status_t rc;
+ OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((ecore_iov_validate_active_rxq(vf)) ||
+ (ecore_iov_validate_active_txq(vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
vf->configured_features = 0;
OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
sizeof(struct pfvf_def_resp_tlv), status);
}
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
- /* Update the TLV with the response */
+ /* Update the TLV with the response.
+ * The VF Rx producers are located in the vf zone.
+ */
if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
req = &mbx->req_virt->start_rxq;
- p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+
+ p_tlv->offset =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
OFFSETOF(struct mstorm_vf_zone,
non_trigger.eth_rx_queue_producers) +
sizeof(struct eth_rx_prod_data) * req->rx_qid;
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
}
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+ struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published if its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return ECORE_IOV_LEGACY_QID_TX;
+ else
+ return ECORE_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (p_qid_tlv == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return ECORE_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ u8 qid_usage_idx, vf_legacy = 0;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
- bool b_legacy_vf = false;
+ struct ecore_queue_cid *p_cid;
+ struct ecore_sb_info sb_dummy;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
- if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
OSAL_MEMSET(¶ms, 0, sizeof(params));
- params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
- params.vf_qid = req->rx_qid;
+ params.queue_id = (u8)p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- /* Legacy VFs have their Producers in a different location, which they
- * calculate on their own and clean the producer prior to this.
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ ¶ms, true, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* The VF Rx producers are located in the vf zone.
+ * Legacy VFs have their producers in the queue zone, but they
+ * calculate the location by their own and clean them prior to this.
*/
- if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
- ETH_HSI_VER_NO_PKT_LEN_TUNN)
- b_legacy_vf = true;
- else
+ if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid),
0);
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- ¶ms,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr,
- req->cqe_pbl_size,
- b_legacy_vf);
-
- if (rc) {
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
out:
- ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
- status, b_legacy_vf);
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ !!(vf_legacy &
+ ECORE_QCID_LEGACY_VF_RX_PROD));
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
+ u32 cid,
u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
else
length = sizeof(struct pfvf_def_resp_tlv);
- p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
- length);
- ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
struct ecore_vf_info *vf)
{
struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union ecore_qm_pq_params pq_params;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ struct ecore_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
+ u32 cid = 0;
enum _ecore_status_t rc;
-
- /* Prepare the parameters which would choose the right PQ */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
+ u16 pq;
OSAL_MEMSET(¶ms, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
- params.qzone_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = ecore_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
+ params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
- params.sb = req->hw_sb;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- ¶ms,
- req->pbl_addr,
- req->pbl_size,
- &pq_params);
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
- if (rc)
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ ¶ms, false, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
- else {
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
}
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
u16 rxq_id,
- u8 num_rxqs,
+ u8 qid_usage_idx,
bool cqe_completion)
{
+ struct ecore_vf_queue *p_queue;
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
- if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+ ECORE_IOV_VALIDATE_Q_NA)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
return ECORE_INVAL;
+ }
- for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
+ p_queue = &vf->vf_queues[rxq_id];
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].rxq_active = false;
- vf->num_active_rxqs--;
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct ecore_queue_cid *p_cid;
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx,
+ rxq_id, p_cid->qid_usage_idx);
+ return ECORE_INVAL;
}
- return rc;
+ /* Now that we know we have a valid Rx-queue - close it */
+ rc = ecore_eth_rx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ vf->num_active_rxqs--;
+
+ return ECORE_SUCCESS;
}
static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
- u16 txq_id, u8 num_txqs)
+ u16 txq_id,
+ u8 qid_usage_idx)
{
+ struct ecore_vf_queue *p_queue;
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
- if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
- for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return ECORE_INVAL;
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].txq_active = false;
- }
- return rc;
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+ return ECORE_SUCCESS;
}
static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_rxqs;
- rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
- req->num_rxqs, req->cqe_completion);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ qid_usage_idx, req->cqe_completion);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
length, status);
}
{
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- /* We give the option of starting from qid != 0, in this case we
- * need to make sure that qid + num_qs doesn't exceed the actual
- * amount of queues that exist.
+ /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+ * would be one. Since no older ecore passed multiple queues
+ * using this API, sanitize on the value.
*/
req = &mbx->req_virt->stop_txqs;
- rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
- if (rc)
- status = PFVF_STATUS_FAILURE;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+ qid_usage_idx);
+ if (rc == ECORE_SUCCESS)
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
length, status);
}
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
- u8 i;
+ u16 i;
req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+ qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+ goto out;
+
+ /* Starting with the addition of CHANNEL_TLV_QID, this API started
+ * expecting a single queue at a time. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+ req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
+ }
+ }
+
for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
+ u16 qid = req->rx_qid + i;
+
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
+ }
+
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_pf_update_mtu(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct ecore_sp_vport_update_params params;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_mtu_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "VF rx_qid = %d isn`t active!\n", qid);
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ /* Valiate PF can send such a request */
+ if (!p_vf->vport_instance) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing MTU update\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto send_status;
+ }
- rc = ecore_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- ECORE_SPQ_MODE_EBLOCK,
- OSAL_NULL);
+ p_req = &mbx->req_virt->update_mtu;
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
- }
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.mtu = p_req->mtu;
+ rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
- ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
- length, status);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+send_status:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_UPDATE_MTU,
+ sizeof(struct pfvf_def_resp_tlv),
+ status);
+ return rc;
}
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
if (p_tlv->type == req_type) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Extended tlv type %s, length %d found\n",
- ecore_channel_tlvs_string[p_tlv->type],
+ qede_ecore_channel_tlvs_string[p_tlv->type],
p_tlv->length);
return p_tlv;
}
p_data->update_approx_mcast_flg = 1;
OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
}
struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_rss_params *p_rss,
- struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
VFPF_UPDATE_RSS_KEY_FLAG);
p_rss->rss_enable = p_rss_tlv->rss_enable;
- p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_eng_id = vf->rss_eng_id;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
sizeof(p_rss->rss_key));
table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ struct ecore_queue_cid *p_cid;
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d,"
- " rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_sge_tpa_params *p_sge_tpa,
struct ecore_iov_vf_mbx *p_mbx,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
u16 tlvs_mask = 0, tlvs_accepted = 0;
- struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 length;
enum _ecore_status_t rc;
goto out;
}
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
OSAL_MEMSET(¶ms, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
- ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
- mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
- ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
+ ecore_iov_vp_update_sge_tpa_param(p_hwfn, ¶ms,
&sge_tpa_params, mbx, &tlvs_mask);
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
- tlvs_accepted = tlvs_mask;
-
-#ifndef LINUX_REMOVE
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
¶ms, &tlvs_accepted) !=
ECORE_SUCCESS) {
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
-#endif
if (!tlvs_accepted) {
if (tlvs_mask)
"Upper-layer prevents said VF"
" configuration\n");
else
- DP_NOTICE(p_hwfn, true,
- "No feature tlvs found for vport update\n");
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No feature tlvs found for vport update\n");
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
status = PFVF_STATUS_FAILURE;
out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_accepted);
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
return ECORE_SUCCESS;
+ /* Since we don't have the implementation of the logic for removing
+ * a forced MAC and restoring shadow MAC, let's not worry about
+ * processing shadow copies of MAC as long as VF trust mode is ON,
+ * to keep things simple.
+ */
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ p_vf->p_vf_info.is_trusted_configured)
+ return ECORE_SUCCESS;
+
/* First remove entries and then add new ones */
if (p_params->opcode == ECORE_FILTER_REMOVE) {
for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
goto out;
}
- /* Update shadow copy of the VF configuration */
+ /* Update shadow copy of the VF configuration. In case shadow indicates
+ * the action should be blocked return success to VF to imitate the
+ * firmware behaviour in such case.
+ */
if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
- ECORE_SUCCESS) {
- status = PFVF_STATUS_FAILURE;
+ ECORE_SUCCESS)
goto out;
- }
/* Determine if the unicast filtering is acceptible by PF */
if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
length, status);
}
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_vf_queue *p_queue;
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ } else {
+ if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+ p_cid, &coal);
+ if (rc != ECORE_SUCCESS)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ u32 i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ u16 vf_id, u16 qid)
+{
+ struct ecore_queue_cid *p_cid;
+ struct ecore_vf_info *vf;
+ struct ecore_ptt *p_ptt;
+ int rc = 0;
+ u32 i;
+
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%d] - Can not set coalescing: VF is not active\n",
+ vf_id);
+ return ECORE_INVAL;
+ }
+
+ vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
return ECORE_SUCCESS;
}
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
- int i, cnt;
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->p_dev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
/* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS; i++) {
- u32 prod;
-
- cons[i] = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc :
+ PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = ecore_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
prod = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
- i * 0x40);
- distance[i] = prod - cons[i];
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
}
/* Wait for consumers to pass the producers */
- i = 0;
+ port_id = 0;
+ tc = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS; i++) {
- u32 tmp;
-
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc :
+ PURE_LB_TC;
+ voq = VOQ(port_id, tc_id,
+ max_phys_tcs_per_port);
tmp = ecore_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- if (distance[i] > tmp - cons[i])
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
+
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
break;
}
- if (i == MAX_NUM_VOQS)
+ if (port_id == max_ports_per_engine)
break;
OSAL_MSLEEP(20);
}
if (cnt == 50) {
- DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
- p_vf->abs_vf_id, i);
+ DP_ERR(p_hwfn,
+ "VF[%d] - pbf polling failed on VOQ %d [port_id %d, tc_id %d]\n",
+ p_vf->abs_vf_id, voq, port_id, tc_id);
return ECORE_TIMEOUT;
}
ack_vfs[vfid / 32] |= (1 << (vfid % 32));
p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
~(1ULL << (rel_vf_id % 64));
- p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
- ~(1ULL << (rel_vf_id % 64));
+ p_vf->vf_mbx.b_pending_msg = false;
}
return rc;
enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 ack_vfs[VF_MAX_STATIC / 32];
+ u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i;
- OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+ OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
/* Since BRB <-> PRS interface can't be tested as part of the flr
* polling due to HW limitations, simply sleep a bit. And since
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 rel_vf_id)
{
- u32 ack_vfs[VF_MAX_STATIC / 32];
+ u32 ack_vfs[EXT_VF_BITMAP_SIZE_IN_DWORDS];
enum _ecore_status_t rc = ECORE_SUCCESS;
- OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+ OSAL_MEM_ZERO(ack_vfs, EXT_VF_BITMAP_SIZE_IN_BYTES);
/* Wait instead of polling the BRB <-> PRS interface */
OSAL_MSLEEP(100);
u16 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
- for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+
+ for (i = 0; i < VF_BITMAP_SIZE_IN_DWORDS; i++)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"[%08x,...,%08x]: %08x\n",
i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
p_bulletin = p_vf->bulletin.p_virt;
if (p_params)
- __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ __ecore_vf_get_link_params(p_params, p_bulletin);
if (p_link)
- __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ __ecore_vf_get_link_state(p_link, p_bulletin);
if (p_caps)
- __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ __ecore_vf_get_link_caps(p_caps, p_bulletin);
}
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
mbx = &p_vf->vf_mbx;
/* ecore_iov_process_mbx_request */
- DP_VERBOSE(p_hwfn,
- ECORE_MSG_IOV,
- "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+#ifndef CONFIG_ECORE_SW_CHANNEL
+ if (!mbx->b_pending_msg) {
+ DP_NOTICE(p_hwfn, true,
+ "VF[%02x]: Trying to process mailbox message when none is pending\n",
+ p_vf->abs_vf_id);
+ return;
+ }
+ mbx->b_pending_msg = false;
+#endif
mbx->first_tlv = mbx->req_virt->first_tlv;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%02x]: Processing mailbox message [type %04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
OSAL_IOV_VF_MSG_TYPE(p_hwfn,
p_vf->relative_vf_id,
mbx->first_tlv.tl.type);
case CHANNEL_TLV_RELEASE:
ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_READ:
+ ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_MTU:
+ ecore_iov_vf_pf_update_mtu(p_hwfn, p_ptt, p_vf);
+ break;
}
} else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
/* If we've received a message from a VF we consider malicious
#endif
}
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+ u64 *events)
{
- u64 add_bit = 1ULL << (vfid % 64);
+ int i;
- /* TODO - add locking mechanisms [no atomics in ecore, so we can't
- * add the lock inside the ecore_pf_iov struct].
- */
- p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+ OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
- u64 *events)
-{
- u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+ ecore_for_each_vf(p_hwfn, i) {
+ struct ecore_vf_info *p_vf;
- /* TODO - Take a lock */
- OSAL_MEMCPY(events, p_pending_events,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
- OSAL_MEMSET(p_pending_events, 0,
- sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ if (p_vf->vf_mbx.b_pending_msg)
+ events[i / 64] |= 1ULL << (i % 64);
+ }
}
static struct ecore_vf_info *
*/
p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+ p_vf->vf_mbx.b_pending_msg = true;
+
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
{
struct ecore_vf_info *p_vf;
- p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
if (!p_vf)
return;
- DP_INFO(p_hwfn,
- "VF [%d] - Malicious behavior [%02x]\n",
- p_vf->abs_vf_id, p_data->errId);
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn, false,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
- p_vf->b_malicious = true;
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
}
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data)
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 OSAL_UNUSED fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return i;
out:
- return MAX_NUM_VFS;
+ return MAX_NUM_VFS_K2;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *ptt, int vfid)
{
- struct ecore_dmae_params params;
+ struct dmae_params params;
struct ecore_vf_info *vf_info;
vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return ECORE_INVAL;
- OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
- params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
- params.src_vfid = vf_info->abs_vf_id;
+ OSAL_MEMSET(¶ms, 0, sizeof(params));
+ SET_FIELD(params.flags, DMAE_PARAMS_SRC_VF_VALID, 0x1);
+ SET_FIELD(params.flags, DMAE_PARAMS_COMPLETION_DST, 0x1);
+ params.src_vf_id = vf_info->abs_vf_id;
if (ecore_dmae_host2host(p_hwfn, ptt,
vf_info->vf_mbx.pending_req,
return;
}
- feature = 1 << MAC_ADDR_FORCED;
- OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured) {
+ feature = 1 << VFPF_BULLETIN_MAC_ADDR;
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << MAC_ADDR_FORCED);
+ } else {
+ feature = 1 << MAC_ADDR_FORCED;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+ }
+
+ OSAL_MEMCPY(vf_info->bulletin.p_virt->mac,
+ mac, ETH_ALEN);
vf_info->bulletin.p_virt->valid_bitmap |= feature;
- /* Forced MAC will disable MAC_ADDR */
- vf_info->bulletin.p_virt->valid_bitmap &=
- ~(1 << VFPF_BULLETIN_MAC_ADDR);
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ if (p_hwfn->pf_params.eth_pf_params.allow_vf_mac_change ||
+ vf_info->p_vf_info.is_trusted_configured)
+ ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
return ECORE_SUCCESS;
}
+#ifndef LINUX_REMOVE
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only, int vfid)
*opaque_fid = vf_info->opaque_fid;
}
-
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vort_id)
-{
- struct ecore_vf_info *vf_info;
-
- vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
- if (!vf_info)
- return;
-
- *p_vort_id = vf_info->vport_id;
-}
+#endif
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_vf_info *p_vf_info;
return sizeof(union pfvf_tlvs);
}
+u8 *ecore_iov_bulletin_get_mac(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return OSAL_NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ (1 << VFPF_BULLETIN_MAC_ADDR)))
+ return OSAL_NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
{
struct ecore_vf_info *p_vf;
{
struct ecore_vf_info *vf;
u8 abs_vp_id = 0;
+ u16 rl_id;
enum _ecore_status_t rc;
vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (rc != ECORE_SUCCESS)
return rc;
- return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return ecore_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate)
{
struct ecore_vf_info *vf;
- u8 vport_id;
int i;
for_each_hwfn(p_dev, i) {
if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
DP_NOTICE(p_hwfn, true,
- "SR-IOV sanity check failed,"
- " can't set min rate\n");
+ "SR-IOV sanity check failed, can't set min rate\n");
return ECORE_INVAL;
}
}
vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
- vport_id = vf->vport_id;
+ if (!vf) {
+ DP_NOTICE(p_dev, true,
+ "Getting vf info failed, can't set min rate\n");
+ return ECORE_INVAL;
+ }
- return ecore_configure_vport_wfq(p_dev, vport_id, rate);
+ return ecore_configure_vport_wfq(p_dev, vf->vport_id, rate);
}
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
}
-enum _ecore_status_t
+int
ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
else
return 0;
}
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_is_hw)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return;
+
+ vf_info->b_hw_channel = b_is_hw;
+}
+#endif