};
struct sge_eth_rxq { /* a SW Ethernet Rx queue */
+ unsigned int flags; /* flags for state of the queue */
struct sge_rspq rspq;
struct sge_fl fl;
struct sge_eth_rx_stats stats;
struct tx_eth_coal_desc coalesce;
};
-enum {
+enum cxgbe_txq_state {
EQ_STOPPED = (1 << 0),
};
+enum cxgbe_rxq_state {
+ IQ_STOPPED = (1 << 0),
+};
+
struct eth_coalesce {
unsigned char *ptr;
unsigned char type;
int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
-int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
-int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq);
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq);
void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
void t4_sge_eth_clear_queues(struct port_info *pi);
void t4_sge_eth_release_queues(struct port_info *pi);
struct arch_specific_params {
u8 nchan;
+ u8 cng_ch_bits_log; /* congestion channel map bits width */
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
adapter->params.arch.mps_rplc_size = 128;
adapter->params.arch.nchan = NCHAN;
adapter->params.arch.vfcount = 128;
+ /* Congestion map is for 4 channels so that
+ * MPS can have 4 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 2;
break;
case CHELSIO_T6:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
adapter->params.arch.mps_rplc_size = 256;
adapter->params.arch.nchan = 2;
adapter->params.arch.vfcount = 256;
+ /* Congestion map is for 2 channels so that
+ * MPS can have 8 priority per port.
+ */
+ adapter->params.arch.cng_ch_bits_log = 3;
break;
default:
dev_err(adapter, "%s: Device %d is not supported\n",
int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- int ret;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
+ struct sge_eth_rxq *rxq;
+ int ret;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
__func__, pi->port_id, rx_queue_id);
- q = eth_dev->data->rx_queues[rx_queue_id];
-
- ret = t4_sge_eth_rxq_start(adap, q);
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_start(adap, rxq);
if (ret == 0)
eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- int ret;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
+ struct sge_eth_rxq *rxq;
+ int ret;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
__func__, pi->port_id, rx_queue_id);
- q = eth_dev->data->rx_queues[rx_queue_id];
- ret = t4_sge_eth_rxq_stop(adap, q);
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_stop(adap, rxq);
if (ret == 0)
eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
unsigned int params;
u32 val;
+ if (unlikely(rxq->flags & IQ_STOPPED)) {
+ *work_done = 0;
+ return 0;
+ }
+
*work_done = process_responses(q, budget, rx_pkts);
if (*work_done) {
return adapter->bar2 + bar2_qoffset;
}
-int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags &= ~IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
-int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags |= IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
/*
* simple (and hopefully less wrong).
*/
if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
- u32 param, val;
+ u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
+ u32 param, val, ch_map = 0;
int i;
param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
X_CONMCTXT_CNGTPMODE_CHANNEL);
for (i = 0; i < 4; i++) {
if (cong & (1 << i))
- val |= V_CONMCTXT_CNGCHMAP(1 <<
- (i << 2));
+ ch_map |= 1 << (i << cng_ch_bits_log);
}
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val);
rxq = &adap->sge.ethrxq[pi->first_rxqset];
for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
if (rxq->rspq.desc)
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
}
txq = &adap->sge.ethtxq[pi->first_txqset];
void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
{
if (rxq->rspq.desc) {
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
}
}