int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
uint16_t nb_pkts);
int t4_mgmt_tx(struct sge_ctrl_txq *txq, struct rte_mbuf *mbuf);
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl);
int t4_sge_init(struct adapter *adap);
int t4vf_sge_init(struct adapter *adap);
int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
return tz->addr;
}
-/**
- * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list. Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
-{
- /*
- * If there's only one mbuf fragment, just return that.
- */
- if (likely(gl->nfrags == 1))
- return gl->mbufs[0];
-
- return NULL;
-}
-
-/**
- * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list. Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
-{
- return t4_pktgl_to_mbuf_usembufs(gl);
-}
-
-/**
- * t4_ethrx_handler - process an ingress ethernet packet
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @si: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *si)
-{
- struct rte_mbuf *mbuf;
- const struct cpl_rx_pkt *pkt;
- const struct rss_header *rss_hdr;
- bool csum_ok;
- struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
- u16 err_vec;
-
- rss_hdr = (const void *)rsp;
- pkt = (const void *)&rsp[1];
- /* Compressed error vector is enabled for T6 only */
- if (q->adapter->params.tp.rx_pkt_encap)
- err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
- else
- err_vec = ntohs(pkt->err_vec);
- csum_ok = pkt->csum_calc && !err_vec;
-
- mbuf = t4_pktgl_to_mbuf(si);
- if (unlikely(!mbuf)) {
- rxq->stats.rx_drops++;
- return 0;
- }
-
- mbuf->port = pkt->iff;
- if (pkt->l2info & htonl(F_RXF_IP)) {
- mbuf->packet_type = RTE_PTYPE_L3_IPV4;
- if (unlikely(!csum_ok))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- } else if (pkt->l2info & htonl(F_RXF_IP6)) {
- mbuf->packet_type = RTE_PTYPE_L3_IPV6;
- }
-
- mbuf->port = pkt->iff;
-
- if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
- mbuf->hash.rss = ntohl(rss_hdr->hash_val);
- }
-
- if (pkt->vlan_ex) {
- mbuf->ol_flags |= PKT_RX_VLAN;
- mbuf->vlan_tci = ntohs(pkt->vlan);
- }
- rxq->stats.pkts++;
- rxq->stats.rx_bytes += mbuf->pkt_len;
-
- return 0;
-}
-
#define CXGB4_MSG_AN ((void *)1)
/**