ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u16 rx_queue_id,
- u8 vf_rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
p_rx_cid->cid = cid;
p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = vport_id;
+ p_rx_cid->vport_id = p_params->vport_id;
- rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, rx_queue_id, vport_id, sb);
+ opaque_fid, cid, p_params->queue_id,
+ p_params->vport_id, p_params->sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
- p_ramrod->sb_index = sb_index;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+ p_ramrod->sb_index = (u8)p_params->sb_idx;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->stats_counter_id = p_params->stats_id;
p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (vf_rx_queue_id || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = vf_rx_queue_id;
+ if (p_params->vf_qid || b_use_zone_a_prod) {
+ p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
b_use_zone_a_prod ? " [legacy]" : "",
- vf_rx_queue_id);
+ p_params->vf_qid);
p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod)
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod)
{
struct ecore_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_rxq_start(p_hwfn,
- rx_queue_id,
- sb,
- sb_index,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size, pp_prod);
}
- rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
if (rc != ECORE_SUCCESS)
return rc;
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+ p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
&p_rx_cid->cid);
if (rc != ECORE_SUCCESS) {
return rc;
}
p_rx_cid->b_cid_allocated = true;
+ p_params->stats_id = abs_stats_id;
+ p_params->vf_qid = 0;
rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
opaque_fid,
p_rx_cid->cid,
- rx_queue_id,
- 0,
- vport_id,
- abs_stats_id,
- sb,
- sb_index,
+ p_params,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
enum _ecore_status_t
ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
- u16 tx_queue_id,
u32 cid,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
union ecore_qm_pq_params *p_pq_params)
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
p_tx_cid->cid = cid;
p_tx_cid->opaque_fid = opaque_fid;
- rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+ rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
if (rc != ECORE_SUCCESS)
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
p_ramrod->vport_id = abs_vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
- p_ramrod->sb_index = sb_index;
- p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+ p_ramrod->sb_index = (u8)p_params->sb_idx;
+ p_ramrod->stats_counter_id = p_params->stats_id;
p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u16 tx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell)
{
struct ecore_hw_cid_data *p_tx_cid;
union ecore_qm_pq_params pq_params;
if (IS_VF(p_hwfn->p_dev)) {
return ecore_vf_pf_txq_start(p_hwfn,
- tx_queue_id,
- sb,
- sb_index,
+ p_params->queue_id,
+ p_params->sb,
+ (u8)p_params->sb_idx,
pbl_addr,
pbl_size,
pp_doorbell);
}
- rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+ rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
if (rc != ECORE_SUCCESS)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+ p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, tx_queue_id,
- vport_id, sb);
+ opaque_fid, p_tx_cid->cid, p_params->queue_id,
+ p_params->vport_id, p_params->sb);
+
+ p_params->stats_id = abs_stats_id;
/* TODO - set tc in the pq_params for multi-cos */
rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
opaque_fid,
- tx_queue_id,
p_tx_cid->cid,
- vport_id,
- abs_stats_id,
- sb,
- sb_index,
+ p_params,
pbl_addr,
pbl_size,
&pq_params);
* @param p_hwfn
* @param opaque_fid
* @param cid
- * @param rx_queue_id
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
+ stats_id is absolute packed in p_params.
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
- u16 rx_queue_id,
- u8 vf_rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
*
* @param p_hwfn
* @param opaque_fid
- * @param tx_queue_id
* @param cid
- * @param vport_id
- * @param stats_id
- * @param sb
- * @param sb_index
+ * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
enum _ecore_status_t
ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
- u16 tx_queue_id,
u32 cid,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
union ecore_qm_pq_params *p_pq_params);
#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
#endif
+struct ecore_queue_start_common_params {
+ /* Rx/Tx queue id */
+ u8 queue_id;
+ u8 vport_id;
+
+ /* stats_id is relative or absolute depends on function */
+ u8 stats_id;
+ u16 sb;
+ u16 sb_idx;
+ u16 vf_qid;
+};
+
struct ecore_rss_params {
u8 update_rss_config;
u8 rss_enable;
*
* @param p_hwfn
* @param opaque_fid
- * @param rx_queue_id RX Queue ID: Zero based, per VPort, allocated
- * by assignment (=rssId)
- * @param vport_id VPort ID
- * @param u8 stats_id VPort ID which the queue stats
- * will be added to
- * @param sb Status Block of the Function Event Ring
- * @param sb_index Index into the status block of the
- * Function Event Ring
+ * @p_params [stats_id is relative, packed in p_params]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 rx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod);
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM * *pp_prod);
/**
* @brief ecore_sp_eth_rx_queue_stop -
*
* @param p_hwfn
* @param opaque_fid
- * @param tx_queue_id TX Queue ID
- * @param vport_id VPort ID
- * @param u8 stats_id VPort ID which the queue stats
- * will be added to
- * @param sb Status Block of the Function Event Ring
- * @param sb_index Index into the status block of the Function
- * Event Ring
+ * @p_params
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u16 tx_queue_id,
- u8 vport_id,
- u8 stats_id,
- u16 sb,
- u8 sb_index,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ void OSAL_IOMEM * *pp_doorbell);
/**
* @brief ecore_sp_eth_tx_queue_stop -
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_start_common_params p_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_rxq_tlv *req;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
+ OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+ p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
+ p_params.vf_qid = req->rx_qid;
+ p_params.vport_id = vf->vport_id;
+ p_params.stats_id = vf->abs_vf_id + 0x10,
+ p_params.sb = req->hw_sb;
+ p_params.sb_idx = req->sb_index;
if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
vf->vf_queues[req->rx_qid].fw_cid,
- vf->vf_queues[req->rx_qid].fw_rx_qid,
- (u8)req->rx_qid,
- vf->vport_id,
- vf->abs_vf_id + 0x10,
- req->hw_sb,
- req->sb_index,
+ &p_params,
req->bd_max_bytes,
req->rxq_addr,
req->cqe_pbl_addr,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_start_common_params p_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
union ecore_qm_pq_params pq_params;
pq_params.eth.vf_id = vf->relative_vf_id;
req = &mbx->req_virt->start_txq;
+ OSAL_MEMSET(&p_params, 0, sizeof(p_params));
+ p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
+ p_params.vport_id = vf->vport_id;
+ p_params.stats_id = vf->abs_vf_id + 0x10,
+ p_params.sb = req->hw_sb;
+ p_params.sb_idx = req->sb_index;
if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
rc = ecore_sp_eth_txq_start_ramrod(
p_hwfn,
vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_tx_qid,
vf->vf_queues[req->tx_qid].fw_cid,
- vf->vport_id,
- vf->abs_vf_id + 0x10,
- req->hw_sb,
- req->sb_index,
+ &p_params,
req->pbl_addr,
req->pbl_size,
&pq_params);
static int
qed_start_rxq(struct ecore_dev *edev,
- uint8_t rss_id, uint8_t rx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index, uint16_t bd_max_bytes,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = rss_id % edev->num_hwfns;
+ hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
rc = ecore_sp_eth_rx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- rx_queue_id / edev->num_hwfns,
- vport_id,
- vport_id,
- sb,
- sb_index,
+ p_params,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
if (rc) {
- DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id);
+ DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- rx_queue_id, rss_id, vport_id, sb);
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
return 0;
}
static int
qed_start_txq(struct ecore_dev *edev,
- uint8_t rss_id, uint16_t tx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = rss_id % edev->num_hwfns;
+ hwfn_index = rss_num % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / edev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
rc = ecore_sp_eth_tx_queue_start(p_hwfn,
p_hwfn->hw_info.opaque_fid,
- tx_queue_id / edev->num_hwfns,
- vport_id,
- vport_id,
- sb,
- sb_index,
+ p_params,
0 /* tc */,
pbl_addr, pbl_size, pp_doorbell);
if (rc) {
- DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id);
+ DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
return rc;
}
DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
- tx_queue_id, rss_id, vport_id, sb);
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->sb);
return 0;
}
struct qed_update_vport_params *params);
int (*q_rx_start)(struct ecore_dev *cdev,
- uint8_t rss_id, uint8_t rx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index, uint16_t bd_max_bytes,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
+ uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
struct qed_stop_rxq_params *params);
int (*q_tx_start)(struct ecore_dev *edev,
- uint8_t rss_id, uint16_t tx_queue_id,
- uint8_t vport_id, uint16_t sb,
- uint8_t sb_index,
+ uint8_t rss_num,
+ struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
+ struct ecore_queue_start_common_params q_params;
struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
struct qed_dev_info *qed_info = &qdev->dev_info.common;
struct qed_update_vport_params vport_update_params;
page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
rx_comp_ring);
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = i;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = RX_PI;
+
ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
- rc = qdev->ops->q_rx_start(edev, i, fp->rxq->queue_id,
- 0,
- fp->sb_info->igu_sb_id,
- RX_PI,
+ rc = qdev->ops->q_rx_start(edev, i, &q_params,
fp->rxq->rx_buf_size,
fp->rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
- rc = qdev->ops->q_tx_start(edev, i, txq->queue_id,
- 0,
- fp->sb_info->igu_sb_id,
- TX_PI(tc),
- p_phys_table, page_cnt,
+
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = txq->queue_id;
+ q_params.vport_id = 0;
+ q_params.sb = fp->sb_info->igu_sb_id;
+ q_params.sb_idx = TX_PI(tc);
+
+ rc = qdev->ops->q_tx_start(edev, i, &q_params,
+ p_phys_table,
+ page_cnt, /* **pp_doorbell */
&txq->doorbell_addr);
if (rc) {
DP_ERR(edev, "Start txq %u failed %d\n",