* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/if_ether.h>
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
{
struct sge *s = &adapter->sge;
- return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+ return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ s->fl_align);
}
#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
{
+ struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
+ d = &q->sdesc[cidx];
while (n--) {
- if (++cidx == q->size)
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
cidx = 0;
+ d = q->sdesc;
+ }
}
q->cidx = cidx;
}
return fl->avail - fl->pend_cred <= s->fl_starve_thres;
}
+static inline unsigned int get_buf_size(struct adapter *adapter,
+ const struct rx_sw_desc *d)
+{
+ unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
+ unsigned int buf_size = 0;
+
+ switch (rx_buf_size_idx) {
+ case RX_SMALL_MTU_BUF:
+ buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
+ break;
+
+ case RX_LARGE_MTU_BUF:
+ buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
+ break;
+
+ default:
+ BUG_ON(1);
+ /* NOT REACHED */
+ }
+
+ return buf_size;
+}
+
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @q: the SGE free list to free buffers from
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- /* see if we have exceeded q->size / 4 */
- if (q->pend_cred >= (q->size / 4)) {
+ if (q->pend_cred >= 64) {
u32 val = adap->params.arch.sge_fl_db;
if (is_t4(adap->params.chip))
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- val | V_QID(q->cntxt_id));
+ t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ val | V_QID(q->cntxt_id));
} else {
- writel(val | V_QID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_KDOORBELL));
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
/*
* This Write memory Barrier will force the write to
unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
struct rte_mbuf *buf_bulk[n];
int ret, i;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+
+ /* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
+ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
+ if (jumbo_en &&
+ ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ buf_size_idx = RX_LARGE_MTU_BUF;
ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
if (unlikely(ret != 0)) {
struct adapter *adap = (struct adapter *)data;
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
int i;
+ unsigned int coal_idx;
/* monitor any pending tx */
for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
- t4_os_lock(&txq->txq_lock);
- if (txq->q.coalesce.idx) {
- if (txq->q.coalesce.idx == txq->q.last_coal_idx &&
- txq->q.pidx == txq->q.last_pidx) {
- ship_tx_pkt_coalesce_wr(adap, txq);
- } else {
- txq->q.last_coal_idx = txq->q.coalesce.idx;
- txq->q.last_pidx = txq->q.pidx;
+ if (t4_os_trylock(&txq->txq_lock)) {
+ coal_idx = txq->q.coalesce.idx;
+ if (coal_idx) {
+ if (coal_idx == txq->q.last_coal_idx &&
+ txq->q.pidx == txq->q.last_pidx) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ } else {
+ txq->q.last_coal_idx = coal_idx;
+ txq->q.last_pidx = txq->q.pidx;
+ }
}
+ t4_os_unlock(&txq->txq_lock);
}
- t4_os_unlock(&txq->txq_lock);
}
rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
}
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits, hw_cidx = ntohs(q->stat->cidx);
- int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
+ int credits;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
- if (in_use < 0)
- in_use += q->size;
-
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
u32 wr_mid;
u64 cntrl, *end;
bool v6;
+ u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
return 0;
}
- rte_prefetch0(&((&txq->q)->sdesc->mbuf->pool));
+ if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ (unlikely(m->pkt_len > max_pkt_len)))
+ goto out_free;
+
pi = (struct port_info *)txq->eth_dev->data->dev_private;
adap = pi->adapter;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
dev_warn(adap, "%s: mapping err for coalesce\n",
txq->stats.mapping_err++;
goto out_free;
}
+ rte_prefetch0((volatile void *)addr);
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
pi, addr);
} else {
len = 0;
len += sizeof(*cpl);
- lso = (void *)(wr + 1);
- v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
- l3hdr_len = m->l3_len;
- l4hdr_len = m->l4_len;
- eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
- len += sizeof(*lso);
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
- V_FW_WR_IMMDLEN(len));
- lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
- F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
- V_LSO_IPV6(v6) |
- V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- V_LSO_IPHDR_LEN(l3hdr_len / 4) |
- V_LSO_TCPHDR_LEN(l4hdr_len / 4));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(m->tso_segsz);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(m->pkt_len);
- else
- lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
- cpl = (void *)(lso + 1);
- cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- V_TXPKT_IPHDR_LEN(l3hdr_len) |
- V_TXPKT_ETHHDR_LEN(eth_xtra_len);
- txq->stats.tso++;
- txq->stats.tx_cso += m->tso_segsz;
+
+ /* Coalescing skipped and we send through normal path */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_WR_IMMDLEN(len));
+ cpl = (void *)(wr + 1);
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, m) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ }
+ } else {
+ lso = (void *)(wr + 1);
+ v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ l3hdr_len = m->l3_len;
+ l4hdr_len = m->l4_len;
+ eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+ len += sizeof(*lso);
+ wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_WR_IMMDLEN(len));
+ lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_IPV6(v6) |
+ V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ V_LSO_IPHDR_LEN(l3hdr_len / 4) |
+ V_LSO_TCPHDR_LEN(l4hdr_len / 4));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(m->tso_segsz);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(m->pkt_len);
+ else
+ lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
+ cpl = (void *)(lso + 1);
+ cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len) |
+ V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ txq->stats.tso++;
+ txq->stats.tx_cso += m->tso_segsz;
+ }
if (m->ol_flags & PKT_TX_VLAN_PKT) {
txq->stats.vlan_ins++;
last_desc -= txq->q.size;
d = &txq->q.sdesc[last_desc];
- if (d->mbuf) {
- rte_pktmbuf_free(d->mbuf);
- d->mbuf = NULL;
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
}
write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
addr);
return t4_pktgl_to_mbuf_usembufs(gl);
}
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
-
/**
* t4_ethrx_handler - process an ingress ethernet packet
* @q: the response queue that received the packet
mbuf->port = pkt->iff;
if (pkt->l2info & htonl(F_RXF_IP)) {
- mbuf->ol_flags |= PKT_RX_IPV4_HDR;
+ mbuf->packet_type = RTE_PTYPE_L3_IPV4;
if (unlikely(!csum_ok))
mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else if (pkt->l2info & htonl(F_RXF_IP6)) {
- mbuf->ol_flags |= PKT_RX_IPV6_HDR;
+ mbuf->packet_type = RTE_PTYPE_L3_IPV6;
}
mbuf->port = pkt->iff;
const struct cpl_rx_pkt *cpl =
(const void *)&q->cur_desc[1];
bool csum_ok = cpl->csum_calc && !cpl->err_vec;
- struct rte_mbuf *pkt;
- u32 len = ntohl(rc->pldbuflen_qid);
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+ len = ntohl(rc->pldbuflen_qid);
BUG_ON(!(len & F_RSPD_NEWBUF));
pkt = rsd->buf;
- pkt->data_len = G_RSPD_LEN(len);
- pkt->pkt_len = pkt->data_len;
- unmap_rx_buf(&rxq->fl);
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter, rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->ol_flags |= PKT_RX_IPV4_HDR;
+ pkt->packet_type = RTE_PTYPE_L3_IPV4;
if (unlikely(!csum_ok))
pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
} else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->ol_flags |= PKT_RX_IPV6_HDR;
+ pkt->packet_type = RTE_PTYPE_L3_IPV6;
}
if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
unsigned int params;
u32 val;
- __refill_fl(q->adapter, &rxq->fl);
+ if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
q->next_intr_params = params;
val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
unsigned int nb_refill;
/* Size needs to be multiple of 16, including status entry. */
- iq->size = roundup(iq->size, 16);
+ iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
+ eth_dev->driver->pci_drv.driver.name,
+ fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
*/
if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
fl->size = s->fl_starve_thres - 1 + 2 * 8;
- fl->size = roundup(fl->size, 8);
+ fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name,
+ eth_dev->driver->pci_drv.driver.name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
refill_fl_err:
t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
- iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
+ iq->cntxt_id, fl->cntxt_id, 0xffff);
fl_nomem:
ret = -ENOMEM;
err:
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, "tx_ring",
+ eth_dev->driver->pci_drv.driver.name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
* The Page Size Buffer must be exactly equal to our Page Size and the
* Large Page Size Buffer should be 0 (per above) or a power of 2.
*/
- if (fl_small_pg != PAGE_SIZE ||
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
(fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);