X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fsge.c;h=21b8fe61c9a7ee748bc6a820fc311dd8763c9ec6;hb=295968d1740760337e16b0d7914875c5cac52850;hp=34e48574aad8ff87b900533ff27d7c9f0f8536d7;hpb=6b78a629954c3857d4bc651a673fe102958a12db;p=dpdk.git diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index 34e48574aa..21b8fe61c9 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -28,7 +27,7 @@ #include #include #include -#include +#include #include #include #include @@ -366,13 +365,10 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, struct rte_mbuf *buf_bulk[n]; int ret, i; struct rte_pktmbuf_pool_private *mbp_priv; - u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_JUMBO_FRAME; /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); - if (jumbo_en && - ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)) + if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000) buf_size_idx = RX_LARGE_MTU_BUF; ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n); @@ -1114,7 +1110,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, u32 wr_mid; u64 cntrl, *end; bool v6; - u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; + u32 max_pkt_len; /* Reject xmit if queue is stopped */ if (unlikely(txq->flags & EQ_STOPPED)) @@ -1130,6 +1126,7 @@ out_free: return 0; } + max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && (unlikely(m->pkt_len > max_pkt_len))) goto out_free; @@ -1694,6 +1691,11 @@ int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, unsigned int params; u32 val; + if (unlikely(rxq->flags & IQ_STOPPED)) { + *work_done = 0; + return 0; + } + *work_done = process_responses(q, budget, rx_pkts); if (*work_done) { @@ -1754,22 +1756,22 @@ static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid, return adapter->bar2 + bar2_qoffset; } -int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq) +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq) { - struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + rxq->flags &= ~IQ_STOPPED; return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0, - rq->cntxt_id, fl_id, 0xffff); + rxq->rspq.cntxt_id, fl_id, 0xffff); } -int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq) +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq) { - struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq); unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff; + rxq->flags |= IQ_STOPPED; return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0, - rq->cntxt_id, fl_id, 0xffff); + rxq->rspq.cntxt_id, fl_id, 0xffff); } /* @@ -1949,7 +1951,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * simple (and hopefully less wrong). */ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { - u32 param, val; + u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; + u32 param, val, ch_map = 0; int i; param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | @@ -1962,9 +1965,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, X_CONMCTXT_CNGTPMODE_CHANNEL); for (i = 0; i < 4; i++) { if (cong & (1 << i)) - val |= V_CONMCTXT_CNGCHMAP(1 << - (i << 2)); + ch_map |= 1 << (i << cng_ch_bits_log); } + val |= V_CONMCTXT_CNGCHMAP(ch_map); } ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); @@ -2193,15 +2196,18 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, */ void t4_sge_eth_clear_queues(struct port_info *pi) { - int i; struct adapter *adap = pi->adapter; - struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset]; - struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; + struct sge_eth_rxq *rxq; + struct sge_eth_txq *txq; + int i; + rxq = &adap->sge.ethrxq[pi->first_rxqset]; for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { if (rxq->rspq.desc) - t4_sge_eth_rxq_stop(adap, &rxq->rspq); + t4_sge_eth_rxq_stop(adap, rxq); } + + txq = &adap->sge.ethtxq[pi->first_txqset]; for (i = 0; i < pi->n_tx_qsets; i++, txq++) { if (txq->q.desc) { struct sge_txq *q = &txq->q; @@ -2217,7 +2223,7 @@ void t4_sge_eth_clear_queues(struct port_info *pi) void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) { if (rxq->rspq.desc) { - t4_sge_eth_rxq_stop(adap, &rxq->rspq); + t4_sge_eth_rxq_stop(adap, rxq); free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL); } } @@ -2241,7 +2247,7 @@ void t4_sge_eth_release_queues(struct port_info *pi) struct sge_eth_txq *txq; unsigned int i; - rxq = &adap->sge.ethrxq[pi->first_qset]; + rxq = &adap->sge.ethrxq[pi->first_rxqset]; /* clean up Ethernet Tx/Rx queues */ for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { /* Free only the queues allocated */ @@ -2253,7 +2259,7 @@ void t4_sge_eth_release_queues(struct port_info *pi) } } - txq = &adap->sge.ethtxq[pi->first_qset]; + txq = &adap->sge.ethtxq[pi->first_txqset]; for (i = 0; i < pi->n_tx_qsets; i++, txq++) { /* Free only the queues allocated */ if (txq->q.desc) {