#include <rte_log.h>
#include <rte_debug.h>
#include <rte_pci.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
- if (jumbo_en &&
- ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)
buf_size_idx = RX_LARGE_MTU_BUF;
ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
*/
static inline int is_eth_imm(const struct rte_mbuf *m)
{
- unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
+ unsigned int hdrlen = (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
hdrlen += sizeof(struct cpl_tx_pkt);
{
int csum_type;
- if (m->ol_flags & PKT_TX_IP_CKSUM) {
- switch (m->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
+ switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
csum_type = TX_CSUM_TCPIP;
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
csum_type = TX_CSUM_UDPIP;
break;
default:
/* fill the cpl message, same as in t4_eth_xmit, this should be kept
* similar to t4_eth_xmit
*/
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, mbuf) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
}
- if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
u32 wr_mid;
u64 cntrl, *end;
bool v6;
- u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
+ u32 max_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
return 0;
}
- if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ if ((!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) &&
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
len += sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
- if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
FW_ETH_TX_PKT_WR :
FW_ETH_TX_PKT_VM_WR) |
cpl = (void *)(wr + 1);
else
cpl = (void *)(vmwr + 1);
- if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
cntrl = hwcsum(adap->params.chip, m) |
F_TXPKT_IPCSUM_DIS;
txq->stats.tx_cso++;
lso = (void *)(wr + 1);
else
lso = (void *)(vmwr + 1);
- v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ v6 = (m->ol_flags & RTE_MBUF_F_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
txq->stats.tx_cso += m->tso_segsz;
}
- if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
if (cpl->vlan_ex)
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
else
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
if (cpl->l2info & htonl(F_RXF_IP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
- csum_ok ? PKT_RX_IP_CKSUM_GOOD :
- PKT_RX_IP_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
+ RTE_MBUF_F_RX_IP_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_IP6))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
- csum_ok ? PKT_RX_IP_CKSUM_GOOD :
- PKT_RX_IP_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
+ RTE_MBUF_F_RX_IP_CKSUM_BAD);
if (cpl->l2info & htonl(F_RXF_TCP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
- csum_ok ? PKT_RX_L4_CKSUM_GOOD :
- PKT_RX_L4_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
+ RTE_MBUF_F_RX_L4_CKSUM_BAD);
else if (cpl->l2info & htonl(F_RXF_UDP))
cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
- csum_ok ? PKT_RX_L4_CKSUM_GOOD :
- PKT_RX_L4_CKSUM_BAD);
+ csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
+ RTE_MBUF_F_RX_L4_CKSUM_BAD);
}
/**
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
- pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
pkt->hash.rss =
ntohl(rss_hdr->hash_val);
}
unsigned int params;
u32 val;
+ if (unlikely(rxq->flags & IQ_STOPPED)) {
+ *work_done = 0;
+ return 0;
+ }
+
*work_done = process_responses(q, budget, rx_pkts);
if (*work_done) {
return adapter->bar2 + bar2_qoffset;
}
-int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags &= ~IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
-int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
+int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq)
{
- struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
+ rxq->flags |= IQ_STOPPED;
return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
- rq->cntxt_id, fl_id, 0xffff);
+ rxq->rspq.cntxt_id, fl_id, 0xffff);
}
/*
* simple (and hopefully less wrong).
*/
if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
- u32 param, val;
+ u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
+ u32 param, val, ch_map = 0;
int i;
param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
X_CONMCTXT_CNGTPMODE_CHANNEL);
for (i = 0; i < 4; i++) {
if (cong & (1 << i))
- val |= V_CONMCTXT_CNGCHMAP(1 <<
- (i << 2));
+ ch_map |= 1 << (i << cng_ch_bits_log);
}
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val);
*/
void t4_sge_eth_clear_queues(struct port_info *pi)
{
- int i;
struct adapter *adap = pi->adapter;
- struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
- struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+ struct sge_eth_rxq *rxq;
+ struct sge_eth_txq *txq;
+ int i;
+ rxq = &adap->sge.ethrxq[pi->first_rxqset];
for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
if (rxq->rspq.desc)
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
}
+
+ txq = &adap->sge.ethtxq[pi->first_txqset];
for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
if (txq->q.desc) {
struct sge_txq *q = &txq->q;
void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
{
if (rxq->rspq.desc) {
- t4_sge_eth_rxq_stop(adap, &rxq->rspq);
+ t4_sge_eth_rxq_stop(adap, rxq);
free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
}
}
struct sge_eth_txq *txq;
unsigned int i;
- rxq = &adap->sge.ethrxq[pi->first_qset];
+ rxq = &adap->sge.ethrxq[pi->first_rxqset];
/* clean up Ethernet Tx/Rx queues */
for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
/* Free only the queues allocated */
}
}
- txq = &adap->sge.ethtxq[pi->first_qset];
+ txq = &adap->sge.ethtxq[pi->first_txqset];
for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
/* Free only the queues allocated */
if (txq->q.desc) {