u16 mtu;
};
-struct ecore_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
- u8 vfid; /* 1-based; 0 signals this is for a PF */
-
- /* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
-};
-
/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
- struct ecore_hw_cid_data *p_tx_cids;
- struct ecore_hw_cid_data *p_rx_cids;
-
struct ecore_dmae_info dmae_info;
/* QM init */
OSAL_FREE(p_dev, p_dev->reset_stats);
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
- OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
if (!p_dev->fw_data)
return ECORE_NOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- u32 num_tx_conns = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
- int tx_size, rx_size;
-
- /* @@@TMP - resc management, change to actual required size */
- if (p_hwfn->pf_params.eth_pf_params.num_cons > num_tx_conns)
- num_tx_conns = p_hwfn->pf_params.eth_pf_params.num_cons;
- tx_size = sizeof(struct ecore_hw_cid_data) * num_tx_conns;
- rx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
- p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- tx_size);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Tx Cids\n");
- goto alloc_no_mem;
- }
-
- p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- rx_size);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Rx Cids\n");
- goto alloc_no_mem;
- }
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
- struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov +
- p_info->igu_sb_cnt_iov)) {
- /* We want the first VF queue to be adjacent to the
- * last PF queue. Since L2 queues can be partial to
- * SBs, we'll use the feature instead.
- */
- return sb_id - p_info->igu_base_sb_iov +
- FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
- } else {
- DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
- sb_id);
- return 0;
- }
-}
-
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
{
int i;
*/
void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
-/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
-
/**
* @brief - Enable Interrupt & Attention for hw function
*
u16 forced_vlan;
};
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+};
+
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
*
* @param p_hwfn
* @param p_ptt
- * @param rel_vf_id
- * @param num_rx_queues
+ * @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 rel_vf_id,
- u16 num_rx_queues);
+ struct ecore_iov_vf_init_params
+ *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
#define ECORE_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (!p_cid->is_vf && IS_PF(p_hwfn->p_dev))
+ ecore_cxt_release_cid(p_hwfn, p_cid->cid);
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid, u8 vf_qid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+ OSAL_MEM_ZERO(p_cid, sizeof(*p_cid));
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->vf_qid = vf_qid;
+ p_cid->rel = *p_params;
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * The would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (b_is_same) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+ /* This is tricky - we're actually interested in whehter this is a PF
+ * entry meant for the VF.
+ */
+ if (!b_is_same)
+ p_cid->is_vf = true;
+out:
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u32 cid = 0;
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev)) {
+ if (ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev))
+ ecore_cxt_release_cid(p_hwfn, cid);
+
+ return p_cid;
+}
+
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params)
return 0;
}
-static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_hw_cid_data *p_cid_data)
-{
- if (!p_cid_data->b_cid_allocated)
- return;
-
- ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
- p_cid_data->b_cid_allocated = false;
-}
-
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod)
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 abs_rx_q_id = 0;
- u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = p_params->vport_id;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->abs.sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = p_params->stats_id;
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_params->vf_qid || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+ if (p_cid->is_vf) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- b_use_zone_a_prod ? " [legacy]" : "",
- p_params->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod)
+ void OSAL_IOMEM * *pp_producer)
{
- struct ecore_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
- u16 abs_l2_queue = 0;
- u8 abs_stats_id = 0;
- enum _ecore_status_t rc;
-
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_rxq_start(p_hwfn,
- (u8)p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size, pp_prod);
- }
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+ *pp_producer = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ __internal_ram_wr(p_hwfn, *pp_producer, sizeof(u32),
(u32 *)(&init_prod_val));
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_rx_cid->b_cid_allocated = true;
- p_params->stats_id = abs_stats_id;
- p_params->vf_qid = 0;
-
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
- opaque_fid,
- p_rx_cid->cid,
- p_params,
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size,
- false);
+ &p_ret_params->p_prod);
+ /* Provide the caller with a reference to as handler */
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handles,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 qid, abs_rx_q_id = 0;
+ struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
- rx_queue_id,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
- qid = rx_queue_id + i;
- p_rx_cid = &p_hwfn->p_rx_cids[qid];
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
/* Get SPQ entry */
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
}
return rc;
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion)
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- u16 abs_rx_q_id = 0;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
- cqe_completion);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) &&
- !eq_completion_only) || cqe_completion;
- p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) ||
- eq_completion_only;
+ p_ramrod->complete_cqe_flg = (!p_cid->is_vf && !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- u16 pq_id)
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
{
struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_tx_cid;
- u16 abs_tx_qzone_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- u8 abs_vport_id;
-
- /* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- p_tx_cid->cid = cid;
- p_tx_cid->opaque_fid = opaque_fid;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->qzone_id, &abs_tx_qzone_id);
- if (rc != ECORE_SUCCESS)
- return rc;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->stats_counter_id = p_params->stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
- p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
- p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
+ dma_addr_t pbl_addr, u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
{
- struct ecore_hw_cid_data *p_tx_cid;
- u8 abs_stats_id = 0;
enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_txq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- pbl_addr,
- pbl_size,
- pp_doorbell);
- }
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc != ECORE_SUCCESS)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
- /* Allocate a CID for the queue */
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_tx_cid->b_cid_allocated = true;
+ return ECORE_SUCCESS;
+}
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
- p_params->stats_id = abs_stats_id;
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- opaque_fid,
- p_tx_cid->cid,
- p_params,
- pbl_addr,
- pbl_size,
- ecore_get_cm_pq_idx_mcos(p_hwfn,
- tc));
-
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
-{
- return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id)
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_tx_cid->cid;
- init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
#include "ecore_spq.h"
#include "ecore_l2_api.h"
-/**
- * @brief ecore_sp_eth_tx_queue_update -
- *
- * This ramrod updates a TX queue. It is used for setting the active
- * state of the queue.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+struct ecore_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct ecore_queue_start_common_params rel;
+ struct ecore_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ bool is_vf;
+ u8 vf_qid;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+};
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid, u8 vf_qid,
+ struct ecore_queue_start_common_params *p_params);
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
- stats_id is absolute packed in p_params.
+ * @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
- * @param b_use_zone_a_prod - support legacy VF producers
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod);
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
+ * @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- u16 pq_id);
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
#endif
struct ecore_queue_start_common_params {
- /* Rx/Tx queue relative id to keep obtained cid in corresponding array
- * RX - upper-bounded by number of FW-queues
- */
- u16 queue_id;
+ /* Should always be relative to entity sending this. */
u8 vport_id;
+ u16 queue_id;
- /* q_zone_id is relative, may be different from queue id
- * currently used by Tx-only, upper-bounded by number of FW-queues
- */
- u16 qzone_id;
-
- /* stats_id is relative or absolute depends on function */
+ /* Relative, but relevant only for PFs */
u8 stats_id;
+
+ /* These are always absolute */
u16 sb;
- u16 sb_idx;
- u16 vf_qid;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
};
struct ecore_rss_params {
struct ecore_spq_comp_cb *p_comp_data);
/**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
*
* This ramrod initializes an RX Queue for a VPort. An Assert is generated if
* the VPort ID is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
- * @p_params [stats_id is relative, packed in p_params]
+ * @p_params Inputs; Relative for PF [SB being an exception]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
* @param cqe_pbl_size Size of the CQE PBL Table
- * @param pp_prod Pointer to place producer's
- * address for the Rx Q (May be
- * NULL).
+ * @param p_ret_params Pointed struct to be filled with outputs.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
+ * @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
/**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
*
* This ramrod initializes a TX Queue for a VPort. An Assert is generated if
* the VPort is not currently initialized.
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
- * @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
- * This address should be used with the
- * DIRECT_REG_WR macro.
+ * @param p_ret_params Pointer to fill the return parameters in.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell);
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
- * @param tx_queue_id TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
* @note Final phase API.
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
- * @param num_rxqs Allow to update multiple rx
- * queues, from rx_queue_id to
- * (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (p_vf->vf_queues[i].rxq_active)
+ if (p_vf->vf_queues[i].p_rx_cid)
return true;
return false;
u8 i;
for (i = 0; i < p_vf->num_rxqs; i++)
- if (p_vf->vf_queues[i].txq_active)
+ if (p_vf->vf_queues[i].p_tx_cid)
return true;
return false;
vf->num_sbs = 0;
}
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
{
u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
if (vf->b_init) {
DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
+ p_params->rel_vf_id);
return ECORE_INVAL;
}
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues"
" [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16)cids);
- num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
- num_rx_queues);
+ num_irqs);
if (num_of_vf_available_chains == 0) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return ECORE_NOMEM;
vf->num_txqs = num_of_vf_available_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
+ struct ecore_vf_q_info *p_queue = &vf->vf_queues[i];
- if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
- DP_NOTICE(p_hwfn, true,
- "VF[%d] will require utilizing of"
- " out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- /* TODO - cleanup the already allocate SBs */
- return ECORE_INVAL;
- }
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
/* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_cid = i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid,
+ p_queue->fw_cid);
}
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
p_vf->num_active_rxqs = 0;
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- p_vf->vf_queues[i].rxq_active = 0;
- p_vf->vf_queues[i].txq_active = 0;
+ struct ecore_vf_q_info *p_queue = &p_vf->vf_queues[i];
+
+ if (p_queue->p_rx_cid) {
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->p_rx_cid);
+ p_queue->p_rx_cid = OSAL_NULL;
+ }
+
+ if (p_queue->p_tx_cid) {
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->p_tx_cid);
+ p_queue->p_tx_cid = OSAL_NULL;
+ }
}
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
/* Update all the Rx queues */
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct ecore_queue_cid *p_cid;
- if (!p_vf->vf_queues[i].rxq_active)
+ p_cid = p_vf->vf_queues[i].p_rx_cid;
+ if (p_cid == OSAL_NULL)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
" fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
struct ecore_queue_start_common_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_q_info *p_queue;
struct vfpf_start_rxq_tlv *req;
bool b_legacy_vf = false;
enum _ecore_status_t rc;
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
OSAL_MEMSET(¶ms, 0, sizeof(params));
- params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
- params.vf_qid = req->rx_qid;
+ params.queue_id = (u8)p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
+ p_queue->p_rx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+ vf->opaque_fid,
+ p_queue->fw_cid,
+ (u8)req->rx_qid,
+ ¶ms);
+ if (p_queue->p_rx_cid == OSAL_NULL)
+ goto out;
+
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
+ p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- ¶ms,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr,
- req->cqe_pbl_size,
- b_legacy_vf);
- if (rc) {
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn,
+ p_queue->p_rx_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
+ p_queue->p_rx_cid = OSAL_NULL;
} else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
out:
- ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
- status, b_legacy_vf);
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ b_legacy_vf);
}
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
struct ecore_queue_start_common_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_q_info *p_queue;
struct vfpf_start_txq_tlv *req;
enum _ecore_status_t rc;
+ u16 pq;
OSAL_MEMSET(¶ms, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
- params.qzone_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
params.sb = req->hw_sb;
params.sb_idx = req->sb_index;
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- ¶ms,
- req->pbl_addr,
- req->pbl_size,
- ecore_get_cm_pq_idx_vf(p_hwfn,
- vf->relative_vf_id));
+ p_queue->p_tx_cid = _ecore_eth_queue_to_cid(p_hwfn,
+ vf->opaque_fid,
+ p_queue->fw_cid,
+ (u8)req->tx_qid,
+ ¶ms);
+ if (p_queue->p_tx_cid == OSAL_NULL)
+ goto out;
- if (rc)
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
- else {
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->p_tx_cid);
+ p_queue->p_tx_cid = OSAL_NULL;
+ } else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
}
out:
u8 num_rxqs,
bool cqe_completion)
{
+ struct ecore_vf_q_info *p_queue;
enum _ecore_status_t rc = ECORE_SUCCESS;
int qid;
return ECORE_INVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
+ p_queue = &vf->vf_queues[qid];
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].rxq_active = false;
+ if (!p_queue->p_rx_cid)
+ continue;
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn,
+ p_queue->p_rx_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ vf->vf_queues[qid].p_rx_cid = OSAL_NULL;
vf->num_active_rxqs--;
}
u16 txq_id, u8 num_txqs)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_vf_q_info *p_queue;
int qid;
if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
return ECORE_INVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[qid];
+ if (!p_queue->p_tx_cid)
+ continue;
- if (rc)
- return rc;
- }
- vf->vf_queues[qid].txq_active = false;
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->p_tx_cid);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_queue->p_tx_cid = OSAL_NULL;
}
return rc;
}
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
u16 qid;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+ /* Validaute inputs */
+ if (req->num_rxqs + req->rx_qid > ECORE_MAX_VF_CHAINS_PER_PF ||
+ !ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid)) {
+ DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid, req->num_rxqs);
+ goto out;
+ }
+
for (i = 0; i < req->num_rxqs; i++) {
qid = req->rx_qid + i;
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "VF rx_qid = %d isn`t active!\n", qid);
- status = PFVF_STATUS_FAILURE;
- break;
+ if (!vf->vf_queues[qid].p_rx_cid) {
+ DP_INFO(p_hwfn,
+ "VF[%d] rx_qid = %d isn`t active!\n",
+ vf->relative_vf_id, qid);
+ goto out;
}
- rc = ecore_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- ECORE_SPQ_MODE_EBLOCK,
- OSAL_NULL);
-
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ handlers[i] = vf->vf_queues[qid].p_rx_cid;
}
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
"rss_ind_table[%d] = %d,"
" rxq is out of range\n",
i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
+ else if (!vf->vf_queues[q_idx].p_rx_cid)
DP_NOTICE(p_hwfn, true,
"rss_ind_table[%d] = %d, rxq is not active\n",
i, q_idx);
struct ecore_vf_q_info {
u16 fw_rx_qid;
+ struct ecore_queue_cid *p_rx_cid;
u16 fw_tx_qid;
+ struct ecore_queue_cid *p_tx_cid;
u8 fw_cid;
- u8 rxq_active;
- u8 txq_active;
};
enum vf_state {
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_qid,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod)
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
+ u16 rx_qid = p_cid->rel.queue_id;
enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
/* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them.
*/
- if (pp_prod && p_iov->b_pre_fp_hsi) {
+ if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
}
/* Learn the address of the producer from the response */
- if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
}
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion)
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- req->rx_qid = rx_qid;
+ req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
+ u16 qid = p_cid->rel.queue_id;
enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- req->tx_qid = tx_queue_id;
+ req->tx_qid = qid;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */
ecore_add_tlv(p_hwfn, &p_iov->offset,
goto exit;
}
- if (pp_doorbell) {
- /* Modern PFs provide the actual offsets, while legacy
- * provided only the queue id.
- */
- if (!p_iov->b_pre_fp_hsi) {
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- resp->offset;
- } else {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
-
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
- }
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- tx_queue_id, *pp_doorbell, resp->offset);
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- req->tx_qid = tx_qid;
+ req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
/* add list termination tlv */
}
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ struct ecore_queue_cid **pp_cid,
u8 num_rxqs,
- u8 comp_cqe_flg, u8 comp_event_flg)
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
struct vfpf_update_rxq_tlv *req;
enum _ecore_status_t rc;
+ /* TODO - API is limited to assuming continuous regions of queues,
+ * but VF queues might not fullfil this requirement.
+ * Need to consider whether we need new TLVs for this, or whether
+ * simply doing it iteratively is good enough.
+ */
+ if (!num_rxqs)
+ return ECORE_INVAL;
+
+again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- req->rx_qid = rx_queue_id;
- req->num_rxqs = num_rxqs;
+ /* Find the length of the current contagious range of queues beginning
+ * at first queue's index.
+ */
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+ if (pp_cid[req->num_rxqs]->rel.queue_id !=
+ req->rx_qid + req->num_rxqs)
+ break;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
goto exit;
}
+ /* Make sure we're done with all the queues */
+ if (req->num_rxqs < num_rxqs) {
+ num_rxqs -= req->num_rxqs;
+ pp_cid += req->num_rxqs;
+ /* TODO - should we give a non-locked variant instead? */
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ goto again;
+ }
+
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
-
return rc;
}
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
- * @param cid - zero based within the VF
- * @param rx_queue_id - zero based within the VF
- * @param sb - VF status block for this queue
- * @param sb_index - Index within the status block
+ * @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
* PF.
*
* @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
+ * @param p_cid
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
- * @param rx_qid
+ * @param p_cid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid,
- bool cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
- * @param tx_qid
+ * @param p_cid
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
#ifndef LINUX_REMOVE
/**
* PF
*
* @param p_hwfn
- * @param rx_queue_id
+ * @param pp_cid - list of queue-cids which we want to update
* @param num_rxqs
- * @param init_sge_ring
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxqs_update(
- struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8 comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
#endif
/**
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
}
static int
-qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
int rc, hwfn_index;
struct ecore_hwfn *p_hwfn;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- params->rx_queue_id / edev->num_hwfns,
- params->eq_completion_only, false);
+ rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
return rc;
}
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
p_hwfn = &edev->hwfns[hwfn_index];
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
- p_params->qzone_id = p_params->queue_id;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- 0 /* tc */,
- pbl_addr, pbl_size, pp_doorbell);
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, 0 /* tc */,
+ pbl_addr, pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
}
static int
-qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- params->tx_queue_id / edev->num_hwfns);
+ rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
return rc;
}
bool is_legacy;
};
-struct qed_stop_rxq_params {
- uint8_t rss_id;
- uint8_t rx_queue_id;
- uint8_t vport_id;
- bool eq_completion_only;
-};
-
struct qed_update_vport_params {
uint8_t vport_id;
uint8_t update_vport_active_flg;
bool clear_stats;
};
-struct qed_stop_txq_params {
- uint8_t rss_id;
- uint8_t tx_queue_id;
-};
-
struct qed_eth_ops {
const struct qed_common_ops *common;
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params);
int (*q_rx_stop)(struct ecore_dev *edev,
- struct qed_stop_rxq_params *params);
+ uint8_t rss_id, void *handle);
int (*q_tx_start)(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params);
int (*q_tx_stop)(struct ecore_dev *edev,
- struct qed_stop_txq_params *params);
+ uint8_t rss_id, void *handle);
int (*eth_cqe_completion)(struct ecore_dev *edev,
uint8_t rss_id,
for_each_queue(i) {
fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_RX) {
+ struct ecore_rxq_start_ret_params ret_params;
+
p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
rx_comp_ring);
page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = i;
q_params.vport_id = 0;
fp->rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start rxq #%d failed %d\n",
fp->rxq->queue_id, rc);
return rc;
}
+ /* Use the return parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
fp->rxq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[RX_PI];
if (!(fp->type & QEDE_FASTPATH_TX))
continue;
for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct ecore_txq_start_ret_params ret_params;
+
txq = fp->txqs[tc];
txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
+ memset(&ret_params, 0, sizeof(ret_params));
q_params.queue_id = txq->queue_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
rc = qdev->ops->q_tx_start(edev, i, &q_params,
p_phys_table,
page_cnt, /* **pp_doorbell */
- &txq->doorbell_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start txq %u failed %d\n",
txq_index, rc);
return rc;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
txq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
SET_FIELD(txq->tx_db.data.params,
{
struct qed_update_vport_params vport_update_params;
struct ecore_dev *edev = &qdev->edev;
+ struct qede_fastpath *fp;
int rc, tc, i;
/* Disable the vport */
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &qdev->fp_array[i];
+ fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
/* Stop all Queues in reverse order */
for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- struct qed_stop_rxq_params rx_params;
+ fp = &qdev->fp_array[i];
/* Stop the Tx Queue(s) */
if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = qdev->fp_array[i].txqs[tc]->queue_id;
- tx_params.tx_queue_id = val;
-
+ struct qede_tx_queue *txq = fp->txqs[tc];
DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, &tx_params);
+ rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
+ i);
return rc;
}
}
/* Stop the Rx Queue */
if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
- rx_params.eq_completion_only = 1;
-
DP_INFO(edev, "Stopping rx queues\n");
-
- rc = qdev->ops->q_rx_stop(edev, &rx_params);
+ rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
return rc;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
struct qede_dev *qdev;
+ void *handle;
};
/*
uint64_t xmit_pkts;
bool is_legacy;
struct qede_dev *qdev;
+ void *handle;
};
struct qede_fastpath {