net: add rte prefix to ether defines
[dpdk.git] / drivers / net / cxgbe / sge.c
index 357b485..b3c4ec2 100644 (file)
@@ -33,9 +33,9 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 
-#include "common.h"
-#include "t4_regs.h"
-#include "t4_msg.h"
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
 #include "cxgbe.h"
 
 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
@@ -73,7 +73,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
 {
        struct sge *s = &adapter->sge;
 
-       return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+       return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
                           s->fl_align);
 }
 
@@ -397,7 +397,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
 
                rte_mbuf_refcnt_set(mbuf, 1);
                mbuf->data_off =
-                       (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+                       (uint16_t)((char *)
+                                  RTE_PTR_ALIGN((char *)mbuf->buf_addr +
                                                 RTE_PKTMBUF_HEADROOM,
                                                 adap->sge.fl_align) -
                                   (char *)mbuf->buf_addr);
@@ -1127,7 +1128,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
         * The chip min packet length is 10 octets but play safe and reject
         * anything shorter than an Ethernet header.
         */
-       if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+       if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
 out_free:
                rte_pktmbuf_free(m);
                return 0;
@@ -1144,7 +1145,8 @@ out_free:
        /* align the end of coalesce WR to a 512 byte boundary */
        txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
 
-       if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+       if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+                       m->pkt_len > RTE_ETHER_MAX_LEN)) {
                if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
                        if (unlikely(map_mbuf(mbuf, addr) < 0)) {
                                dev_warn(adap, "%s: mapping err for coalesce\n",
@@ -1229,7 +1231,7 @@ out_free:
                v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
                l3hdr_len = m->l3_len;
                l4hdr_len = m->l4_len;
-               eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+               eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
                len += sizeof(*lso);
                wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
                                                  FW_ETH_TX_PKT_WR :
@@ -1494,98 +1496,6 @@ alloc_sw_ring:
        return tz->addr;
 }
 
-/**
- * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list.  Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
-{
-       /*
-        * If there's only one mbuf fragment, just return that.
-        */
-       if (likely(gl->nfrags == 1))
-               return gl->mbufs[0];
-
-       return NULL;
-}
-
-/**
- * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list.  Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
-{
-       return t4_pktgl_to_mbuf_usembufs(gl);
-}
-
-/**
- * t4_ethrx_handler - process an ingress ethernet packet
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @si: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-                    const struct pkt_gl *si)
-{
-       struct rte_mbuf *mbuf;
-       const struct cpl_rx_pkt *pkt;
-       const struct rss_header *rss_hdr;
-       bool csum_ok;
-       struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
-       u16 err_vec;
-
-       rss_hdr = (const void *)rsp;
-       pkt = (const void *)&rsp[1];
-       /* Compressed error vector is enabled for T6 only */
-       if (q->adapter->params.tp.rx_pkt_encap)
-               err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
-       else
-               err_vec = ntohs(pkt->err_vec);
-       csum_ok = pkt->csum_calc && !err_vec;
-
-       mbuf = t4_pktgl_to_mbuf(si);
-       if (unlikely(!mbuf)) {
-               rxq->stats.rx_drops++;
-               return 0;
-       }
-
-       mbuf->port = pkt->iff;
-       if (pkt->l2info & htonl(F_RXF_IP)) {
-               mbuf->packet_type = RTE_PTYPE_L3_IPV4;
-               if (unlikely(!csum_ok))
-                       mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-               if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
-                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-       } else if (pkt->l2info & htonl(F_RXF_IP6)) {
-               mbuf->packet_type = RTE_PTYPE_L3_IPV6;
-       }
-
-       mbuf->port = pkt->iff;
-
-       if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
-               mbuf->ol_flags |= PKT_RX_RSS_HASH;
-               mbuf->hash.rss = ntohl(rss_hdr->hash_val);
-       }
-
-       if (pkt->vlan_ex) {
-               mbuf->ol_flags |= PKT_RX_VLAN;
-               mbuf->vlan_tci = ntohs(pkt->vlan);
-       }
-       rxq->stats.pkts++;
-       rxq->stats.rx_bytes += mbuf->pkt_len;
-
-       return 0;
-}
-
 #define CXGB4_MSG_AN ((void *)1)
 
 /**
@@ -1604,6 +1514,52 @@ static inline void rspq_next(struct sge_rspq *q)
        }
 }
 
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+                                      uint64_t ol_flags)
+{
+       pkt->packet_type |= ptype;
+       pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+                                       const struct cpl_rx_pkt *cpl,
+                                       struct rte_mbuf *pkt)
+{
+       bool csum_ok;
+       u16 err_vec;
+
+       if (adap->params.tp.rx_pkt_encap)
+               err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+       else
+               err_vec = ntohs(cpl->err_vec);
+
+       csum_ok = cpl->csum_calc && !err_vec;
+
+       if (cpl->vlan_ex)
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+                                   PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+       else
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+       if (cpl->l2info & htonl(F_RXF_IP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+                                   csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+                                             PKT_RX_IP_CKSUM_BAD);
+       else if (cpl->l2info & htonl(F_RXF_IP6))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+                                   csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+                                             PKT_RX_IP_CKSUM_BAD);
+
+       if (cpl->l2info & htonl(F_RXF_TCP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+                                   csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+                                             PKT_RX_L4_CKSUM_BAD);
+       else if (cpl->l2info & htonl(F_RXF_UDP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+                                   csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+                                             PKT_RX_L4_CKSUM_BAD);
+}
+
 /**
  * process_responses - process responses from an SGE response queue
  * @q: the ingress queue to process
@@ -1655,8 +1611,6 @@ static int process_responses(struct sge_rspq *q, int budget,
                                        (const void *)&q->cur_desc[1];
                                struct rte_mbuf *pkt, *npkt;
                                u32 len, bufsz;
-                               bool csum_ok;
-                               u16 err_vec;
 
                                rc = (const struct rsp_ctrl *)
                                     ((const char *)q->cur_desc +
@@ -1673,16 +1627,6 @@ static int process_responses(struct sge_rspq *q, int budget,
                                len = G_RSPD_LEN(len);
                                pkt->pkt_len = len;
 
-                               /* Compressed error vector is enabled for
-                                * T6 only
-                                */
-                               if (q->adapter->params.tp.rx_pkt_encap)
-                                       err_vec = G_T6_COMPR_RXERR_VEC(
-                                                       ntohs(cpl->err_vec));
-                               else
-                                       err_vec = ntohs(cpl->err_vec);
-                               csum_ok = cpl->csum_calc && !err_vec;
-
                                /* Chain mbufs into len if necessary */
                                while (len) {
                                        struct rte_mbuf *new_pkt = rsd->buf;
@@ -1700,20 +1644,7 @@ static int process_responses(struct sge_rspq *q, int budget,
                                npkt->next = NULL;
                                pkt->nb_segs--;
 
-                               if (cpl->l2info & htonl(F_RXF_IP)) {
-                                       pkt->packet_type = RTE_PTYPE_L3_IPV4;
-                                       if (unlikely(!csum_ok))
-                                               pkt->ol_flags |=
-                                                       PKT_RX_IP_CKSUM_BAD;
-
-                                       if ((cpl->l2info &
-                                            htonl(F_RXF_UDP | F_RXF_TCP)) &&
-                                           !csum_ok)
-                                               pkt->ol_flags |=
-                                                       PKT_RX_L4_CKSUM_BAD;
-                               } else if (cpl->l2info & htonl(F_RXF_IP6)) {
-                                       pkt->packet_type = RTE_PTYPE_L3_IPV6;
-                               }
+                               cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
 
                                if (!rss_hdr->filter_tid &&
                                    rss_hdr->hash_type) {
@@ -1722,11 +1653,8 @@ static int process_responses(struct sge_rspq *q, int budget,
                                                ntohl(rss_hdr->hash_val);
                                }
 
-                               if (cpl->vlan_ex) {
-                                       pkt->ol_flags |= PKT_RX_VLAN |
-                                                        PKT_RX_VLAN_STRIPPED;
+                               if (cpl->vlan_ex)
                                        pkt->vlan_tci = ntohs(cpl->vlan);
-                               }
 
                                rte_pktmbuf_adj(pkt, s->pktshift);
                                rxq->stats.pkts++;
@@ -1873,10 +1801,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        /* Size needs to be multiple of 16, including status entry. */
        iq->size = cxgbe_roundup(iq->size, 16);
 
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->device->driver->name,
-                fwevtq ? "fwq_ring" : "rx_ring",
-                eth_dev->data->port_id, queue_id);
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id,
+                       fwevtq ? "fwq_ring" : "rx_ring");
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
        iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
@@ -1889,12 +1816,16 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                            F_FW_CMD_WRITE | F_FW_CMD_EXEC);
 
        if (is_pf4(adap)) {
-               pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
+               pciechan = pi->tx_chan;
                c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
                                     V_FW_IQ_CMD_VFN(0));
                if (cong >= 0)
-                       c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
-                                                   F_FW_IQ_CMD_IQRO);
+                       c.iqns_to_fl0congen =
+                               htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+                                     V_FW_IQ_CMD_IQTYPE(cong ?
+                                                        FW_IQ_IQTYPE_NIC :
+                                                        FW_IQ_IQTYPE_OFLD) |
+                                     F_FW_IQ_CMD_IQRO);
        } else {
                pciechan = pi->port_id;
        }
@@ -1934,10 +1865,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                        fl->size = s->fl_starve_thres - 1 + 2 * 8;
                fl->size = cxgbe_roundup(fl->size, 8);
 
-               snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                        eth_dev->device->driver->name,
-                        fwevtq ? "fwq_ring" : "fl_ring",
-                        eth_dev->data->port_id, queue_id);
+               snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                               eth_dev->data->port_id, queue_id,
+                               fwevtq ? "fwq_ring" : "fl_ring");
                snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
                fl->desc = alloc_ring(fl->size, sizeof(__be64),
@@ -2140,9 +2070,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        /* Add status entries */
        nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->device->driver->name, "tx_ring",
-                eth_dev->data->port_id, queue_id);
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id, "tx_ring");
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
        txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
@@ -2219,9 +2148,8 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
        /* Add status entries */
        nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->device->driver->name, "ctrl_tx_ring",
-                eth_dev->data->port_id, queue_id);
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
        txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),