#define MAX_COALESCE_LEN 64000
-static inline int wraps_around(struct sge_txq *q, int ndesc)
+static inline bool wraps_around(struct sge_txq *q, int ndesc)
{
- return (q->pidx + ndesc) > q->size ? 1 : 0;
+ return (q->pidx + ndesc) > q->size ? true : false;
}
static void tx_timer_cb(void *data)
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
- wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
vmwr = (void *)&q->desc[q->pidx];
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
wr->npkt = q->coalesce.idx;
wr->r3 = 0;
if (is_pf4(adap)) {
- wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
wr->type = q->coalesce.type;
+ if (likely(wr->type != 0))
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ else
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
} else {
wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
vmwr->r4 = 0;
ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
credits = txq_avail(q) - ndesc;
+ if (unlikely(wraps_around(q, ndesc)))
+ return 0;
+
/* If we are wrapping or this is last mbuf then, send the
* already coalesced mbufs and let the non-coalesce pass
* handle the mbuf.
*/
- if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
+ if (unlikely(credits < 0)) {
ship_tx_pkt_coalesce_wr(adap, txq);
- return 0;
+ return -EBUSY;
}
/* If the max coalesce len or the max WR len is reached
ndesc = flits_to_desc(q->coalesce.flits + flits);
credits = txq_avail(q) - ndesc;
- if (unlikely(credits < 0 || wraps_around(q, ndesc)))
+ if (unlikely(wraps_around(q, ndesc)))
return 0;
+
+ if (unlikely(credits < 0))
+ return -EBUSY;
+
q->coalesce.flits += wr_size / sizeof(__be64);
q->coalesce.type = type;
q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
unsigned int flits, ndesc, cflits;
int l3hdr_len, l4hdr_len, eth_xtra_len;
int len, last_desc;
- int credits;
+ int should_coal, credits;
u32 wr_mid;
u64 cntrl, *end;
bool v6;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
- m->pkt_len > RTE_ETHER_MAX_LEN)) {
- if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
+ if ((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0) {
+ should_coal = should_tx_packet_coalesce(txq, mbuf, &cflits, adap);
+ if (should_coal > 0) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
dev_warn(adap, "%s: mapping err for coalesce\n",
__func__);
}
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
pi, addr, nb_pkts);
- } else {
- return -EBUSY;
+ } else if (should_coal < 0) {
+ return should_coal;
}
}
end = (u64 *)vmwr + flits;
}
- len = 0;
- len += sizeof(*cpl);
+ len = sizeof(*cpl);
/* Coalescing skipped and we send through normal path */
if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {