unsigned int params;
u32 val;
+ if (unlikely(rxq->flags & IQ_STOPPED)) {
+ *work_done = 0;
+ return 0;
+ }
+
*work_done = process_responses(q, budget, rx_pkts);
if (*work_done) {
return adapter->bar2 + bar2_qoffset;
}
-int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags &= ~IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
-int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags |= IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
/*
* simple (and hopefully less wrong).
*/
if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
- u32 param, val;
+ u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
+ u32 param, val, ch_map = 0;
int i;
param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
X_CONMCTXT_CNGTPMODE_CHANNEL);
for (i = 0; i < 4; i++) {
if (cong & (1 << i))
- val |= V_CONMCTXT_CNGCHMAP(1 <<
- (i << 2));
+ ch_map |= 1 << (i << cng_ch_bits_log);
}
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val);
*/
void t4_sge_eth_clear_queues(struct port_info *pi)
{
- int i;
struct adapter *adap = pi->adapter;
- struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
- struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+ struct sge_eth_rxq *rxq;
+ struct sge_eth_txq *txq;
+ int i;
+ rxq = &adap->sge.ethrxq[pi->first_rxqset];
for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
if (rxq->rspq.desc)
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
}
+
+ txq = &adap->sge.ethtxq[pi->first_txqset];
for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
if (txq->q.desc) {
struct sge_txq *q = &txq->q;
void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
{
if (rxq->rspq.desc) {
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
}
}
struct sge_eth_txq *txq;
unsigned int i;
- rxq = &adap->sge.ethrxq[pi->first_qset];
+ rxq = &adap->sge.ethrxq[pi->first_rxqset];
/* clean up Ethernet Tx/Rx queues */
for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
/* Free only the queues allocated */
}
}
- txq = &adap->sge.ethtxq[pi->first_qset];
+ txq = &adap->sge.ethtxq[pi->first_txqset];
for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
/* Free only the queues allocated */
if (txq->q.desc) {