{
struct sge *s = &adapter->sge;
- return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
s->fl_align);
}
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *sd;
unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
- unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
- ETH_COALESCE_VF_PKT_NUM;
-
-#ifdef RTE_LIBRTE_CXGBE_TPUT
- RTE_SET_USED(nb_pkts);
-#endif
if (q->coalesce.type == 0) {
mc = (struct ulp_txpkt *)q->coalesce.ptr;
sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
sd->coalesce.idx = (idx & 1) + 1;
- /* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == max_coal_pkt_num
-#ifndef RTE_LIBRTE_CXGBE_TPUT
- || q->coalesce.idx >= nb_pkts
-#endif
- )
+ /* Send the coalesced work request, only if max reached. However,
+ * if lower latency is preferred over throughput, then don't wait
+ * for coalescing the next Tx burst and send the packets now.
+ */
+ q->coalesce.idx++;
+ if (q->coalesce.idx == adap->params.max_tx_coalesce_num ||
+ (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
ship_tx_pkt_coalesce_wr(adap, txq);
+
return 0;
}
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
- if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+ if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
out_free:
rte_pktmbuf_free(m);
return 0;
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
- pi = (struct port_info *)txq->data->dev_private;
+ pi = txq->data->dev_private;
adap = pi->adapter;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
dev_warn(adap, "%s: mapping err for coalesce\n",
txq->stats.mapping_err++;
goto out_free;
}
- rte_prefetch0((volatile void *)addr);
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
pi, addr, nb_pkts);
} else {
v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
l3hdr_len = m->l3_len;
l4hdr_len = m->l4_len;
- eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+ eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
len += sizeof(*lso);
wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
FW_ETH_TX_PKT_WR :
}
}
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+ uint64_t ol_flags)
+{
+ pkt->packet_type |= ptype;
+ pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+ const struct cpl_rx_pkt *cpl,
+ struct rte_mbuf *pkt)
+{
+ bool csum_ok;
+ u16 err_vec;
+
+ if (adap->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ if (cpl->vlan_ex)
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ else
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+ if (cpl->l2info & htonl(F_RXF_IP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_IP6))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+
+ if (cpl->l2info & htonl(F_RXF_TCP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_UDP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+}
+
/**
* process_responses - process responses from an SGE response queue
* @q: the ingress queue to process
(const void *)&q->cur_desc[1];
struct rte_mbuf *pkt, *npkt;
u32 len, bufsz;
- bool csum_ok;
- u16 err_vec;
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc +
len = G_RSPD_LEN(len);
pkt->pkt_len = len;
- /* Compressed error vector is enabled for
- * T6 only
- */
- if (q->adapter->params.tp.rx_pkt_encap)
- err_vec = G_T6_COMPR_RXERR_VEC(
- ntohs(cpl->err_vec));
- else
- err_vec = ntohs(cpl->err_vec);
- csum_ok = cpl->csum_calc && !err_vec;
-
/* Chain mbufs into len if necessary */
while (len) {
struct rte_mbuf *new_pkt = rsd->buf;
npkt->next = NULL;
pkt->nb_segs--;
- if (cpl->l2info & htonl(F_RXF_IP)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- pkt->ol_flags |=
- PKT_RX_IP_CKSUM_BAD;
-
- if ((cpl->l2info &
- htonl(F_RXF_UDP | F_RXF_TCP)) &&
- !csum_ok)
- pkt->ol_flags |=
- PKT_RX_L4_CKSUM_BAD;
- } else if (cpl->l2info & htonl(F_RXF_IP6)) {
- pkt->packet_type = RTE_PTYPE_L3_IPV6;
- }
+ cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
if (!rss_hdr->filter_tid &&
rss_hdr->hash_type) {
ntohl(rss_hdr->hash_val);
}
- if (cpl->vlan_ex) {
- pkt->ol_flags |= PKT_RX_VLAN |
- PKT_RX_VLAN_STRIPPED;
+ if (cpl->vlan_ex)
pkt->vlan_tci = ntohs(cpl->vlan);
- }
rte_pktmbuf_adj(pkt, s->pktshift);
rxq->stats.pkts++;
int ret, flsz = 0;
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
int ret, nentries;
struct fw_eq_eth_cmd c;
struct sge *s = &adap->sge;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
u8 pciechan;
int ret, nentries;
struct fw_eq_ctrl_cmd c;
struct sge *s = &adap->sge;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];