net: add rte prefix to ether defines
[dpdk.git] / drivers / net / cxgbe / sge.c
index 83e26d0..b3c4ec2 100644 (file)
@@ -33,9 +33,9 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 
-#include "common.h"
-#include "t4_regs.h"
-#include "t4_msg.h"
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
 #include "cxgbe.h"
 
 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
@@ -54,6 +54,11 @@ static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
  */
 #define MAX_IMM_TX_PKT_LEN 256
 
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
 /*
  * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
  * per mbuf buffer).  We currently only support two sizes for 1500- and
@@ -68,7 +73,7 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
 {
        struct sge *s = &adapter->sge;
 
-       return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
+       return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
                           s->fl_align);
 }
 
@@ -361,7 +366,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
        struct rte_mbuf *buf_bulk[n];
        int ret, i;
        struct rte_pktmbuf_pool_private *mbp_priv;
-       u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
+       u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+               DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
        mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
@@ -391,7 +397,8 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
 
                rte_mbuf_refcnt_set(mbuf, 1);
                mbuf->data_off =
-                       (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+                       (uint16_t)((char *)
+                                  RTE_PTR_ALIGN((char *)mbuf->buf_addr +
                                                 RTE_PKTMBUF_HEADROOM,
                                                 adap->sge.fl_align) -
                                   (char *)mbuf->buf_addr);
@@ -1121,7 +1128,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
         * The chip min packet length is 10 octets but play safe and reject
         * anything shorter than an Ethernet header.
         */
-       if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+       if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
 out_free:
                rte_pktmbuf_free(m);
                return 0;
@@ -1138,7 +1145,8 @@ out_free:
        /* align the end of coalesce WR to a 512 byte boundary */
        txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
 
-       if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
+       if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+                       m->pkt_len > RTE_ETHER_MAX_LEN)) {
                if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
                        if (unlikely(map_mbuf(mbuf, addr) < 0)) {
                                dev_warn(adap, "%s: mapping err for coalesce\n",
@@ -1223,7 +1231,7 @@ out_free:
                v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
                l3hdr_len = m->l3_len;
                l4hdr_len = m->l4_len;
-               eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
+               eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
                len += sizeof(*lso);
                wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
                                                  FW_ETH_TX_PKT_WR :
@@ -1298,6 +1306,126 @@ out_free:
        return 0;
 }
 
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+       int hw_cidx = ntohs(q->stat->cidx);
+       int reclaim = hw_cidx - q->cidx;
+
+       if (reclaim < 0)
+               reclaim += q->size;
+
+       q->in_use -= reclaim;
+       q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+       return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+                          int len)
+{
+       int left = RTE_PTR_DIFF(q->stat, *to);
+
+       if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+               rte_memcpy(*to, from, len);
+               *to = RTE_PTR_ADD(*to, len);
+       } else {
+               rte_memcpy(*to, from, left);
+               from = RTE_PTR_ADD(from, left);
+               left = len - left;
+               rte_memcpy((void *)q->desc, from, left);
+               *to = RTE_PTR_ADD((void *)q->desc, left);
+       }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue.  Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+       unsigned int ndesc;
+       struct fw_wr_hdr *wr;
+       caddr_t dst;
+
+       if (unlikely(!is_imm(mbuf))) {
+               WARN_ON(1);
+               rte_pktmbuf_free(mbuf);
+               return -1;
+       }
+
+       reclaim_completed_tx_imm(&q->q);
+       ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+       t4_os_lock(&q->ctrlq_lock);
+
+       q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+       if (unlikely(q->full)) {
+               t4_os_unlock(&q->ctrlq_lock);
+               return -1;
+       }
+
+       wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+       dst = (void *)wr;
+       inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+                      &dst, mbuf->data_len);
+
+       txq_advance(&q->q, ndesc);
+       if (unlikely(txq_avail(&q->q) < 64))
+               wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+       q->txp++;
+
+       ring_tx_db(q->adapter, &q->q);
+       t4_os_unlock(&q->ctrlq_lock);
+
+       rte_pktmbuf_free(mbuf);
+       return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+       return ctrl_xmit(q, mbuf);
+}
+
 /**
  * alloc_ring - allocate resources for an SGE descriptor ring
  * @dev: the PCI device's core device
@@ -1344,7 +1472,8 @@ static void *alloc_ring(size_t nelem, size_t elem_size,
         * handle the maximum ring size is allocated in order to allow for
         * resizing in later calls to the queue setup function.
         */
-       tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+       tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+                       RTE_MEMZONE_IOVA_CONTIG, 4096);
        if (!tz)
                return NULL;
 
@@ -1367,98 +1496,6 @@ alloc_sw_ring:
        return tz->addr;
 }
 
-/**
- * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list.  Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
-{
-       /*
-        * If there's only one mbuf fragment, just return that.
-        */
-       if (likely(gl->nfrags == 1))
-               return gl->mbufs[0];
-
-       return NULL;
-}
-
-/**
- * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list.  Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
-{
-       return t4_pktgl_to_mbuf_usembufs(gl);
-}
-
-/**
- * t4_ethrx_handler - process an ingress ethernet packet
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @si: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
-                    const struct pkt_gl *si)
-{
-       struct rte_mbuf *mbuf;
-       const struct cpl_rx_pkt *pkt;
-       const struct rss_header *rss_hdr;
-       bool csum_ok;
-       struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
-       u16 err_vec;
-
-       rss_hdr = (const void *)rsp;
-       pkt = (const void *)&rsp[1];
-       /* Compressed error vector is enabled for T6 only */
-       if (q->adapter->params.tp.rx_pkt_encap)
-               err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
-       else
-               err_vec = ntohs(pkt->err_vec);
-       csum_ok = pkt->csum_calc && !err_vec;
-
-       mbuf = t4_pktgl_to_mbuf(si);
-       if (unlikely(!mbuf)) {
-               rxq->stats.rx_drops++;
-               return 0;
-       }
-
-       mbuf->port = pkt->iff;
-       if (pkt->l2info & htonl(F_RXF_IP)) {
-               mbuf->packet_type = RTE_PTYPE_L3_IPV4;
-               if (unlikely(!csum_ok))
-                       mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-               if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
-                       mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-       } else if (pkt->l2info & htonl(F_RXF_IP6)) {
-               mbuf->packet_type = RTE_PTYPE_L3_IPV6;
-       }
-
-       mbuf->port = pkt->iff;
-
-       if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
-               mbuf->ol_flags |= PKT_RX_RSS_HASH;
-               mbuf->hash.rss = ntohl(rss_hdr->hash_val);
-       }
-
-       if (pkt->vlan_ex) {
-               mbuf->ol_flags |= PKT_RX_VLAN;
-               mbuf->vlan_tci = ntohs(pkt->vlan);
-       }
-       rxq->stats.pkts++;
-       rxq->stats.rx_bytes += mbuf->pkt_len;
-
-       return 0;
-}
-
 #define CXGB4_MSG_AN ((void *)1)
 
 /**
@@ -1477,6 +1514,52 @@ static inline void rspq_next(struct sge_rspq *q)
        }
 }
 
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+                                      uint64_t ol_flags)
+{
+       pkt->packet_type |= ptype;
+       pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+                                       const struct cpl_rx_pkt *cpl,
+                                       struct rte_mbuf *pkt)
+{
+       bool csum_ok;
+       u16 err_vec;
+
+       if (adap->params.tp.rx_pkt_encap)
+               err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+       else
+               err_vec = ntohs(cpl->err_vec);
+
+       csum_ok = cpl->csum_calc && !err_vec;
+
+       if (cpl->vlan_ex)
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+                                   PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+       else
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+       if (cpl->l2info & htonl(F_RXF_IP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+                                   csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+                                             PKT_RX_IP_CKSUM_BAD);
+       else if (cpl->l2info & htonl(F_RXF_IP6))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+                                   csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+                                             PKT_RX_IP_CKSUM_BAD);
+
+       if (cpl->l2info & htonl(F_RXF_TCP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+                                   csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+                                             PKT_RX_L4_CKSUM_BAD);
+       else if (cpl->l2info & htonl(F_RXF_UDP))
+               cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+                                   csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+                                             PKT_RX_L4_CKSUM_BAD);
+}
+
 /**
  * process_responses - process responses from an SGE response queue
  * @q: the ingress queue to process
@@ -1528,8 +1611,6 @@ static int process_responses(struct sge_rspq *q, int budget,
                                        (const void *)&q->cur_desc[1];
                                struct rte_mbuf *pkt, *npkt;
                                u32 len, bufsz;
-                               bool csum_ok;
-                               u16 err_vec;
 
                                rc = (const struct rsp_ctrl *)
                                     ((const char *)q->cur_desc +
@@ -1546,16 +1627,6 @@ static int process_responses(struct sge_rspq *q, int budget,
                                len = G_RSPD_LEN(len);
                                pkt->pkt_len = len;
 
-                               /* Compressed error vector is enabled for
-                                * T6 only
-                                */
-                               if (q->adapter->params.tp.rx_pkt_encap)
-                                       err_vec = G_T6_COMPR_RXERR_VEC(
-                                                       ntohs(cpl->err_vec));
-                               else
-                                       err_vec = ntohs(cpl->err_vec);
-                               csum_ok = cpl->csum_calc && !err_vec;
-
                                /* Chain mbufs into len if necessary */
                                while (len) {
                                        struct rte_mbuf *new_pkt = rsd->buf;
@@ -1573,20 +1644,7 @@ static int process_responses(struct sge_rspq *q, int budget,
                                npkt->next = NULL;
                                pkt->nb_segs--;
 
-                               if (cpl->l2info & htonl(F_RXF_IP)) {
-                                       pkt->packet_type = RTE_PTYPE_L3_IPV4;
-                                       if (unlikely(!csum_ok))
-                                               pkt->ol_flags |=
-                                                       PKT_RX_IP_CKSUM_BAD;
-
-                                       if ((cpl->l2info &
-                                            htonl(F_RXF_UDP | F_RXF_TCP)) &&
-                                           !csum_ok)
-                                               pkt->ol_flags |=
-                                                       PKT_RX_L4_CKSUM_BAD;
-                               } else if (cpl->l2info & htonl(F_RXF_IP6)) {
-                                       pkt->packet_type = RTE_PTYPE_L3_IPV6;
-                               }
+                               cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
 
                                if (!rss_hdr->filter_tid &&
                                    rss_hdr->hash_type) {
@@ -1595,10 +1653,8 @@ static int process_responses(struct sge_rspq *q, int budget,
                                                ntohl(rss_hdr->hash_val);
                                }
 
-                               if (cpl->vlan_ex) {
-                                       pkt->ol_flags |= PKT_RX_VLAN;
+                               if (cpl->vlan_ex)
                                        pkt->vlan_tci = ntohs(cpl->vlan);
-                               }
 
                                rte_pktmbuf_adj(pkt, s->pktshift);
                                rxq->stats.pkts++;
@@ -1745,10 +1801,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        /* Size needs to be multiple of 16, including status entry. */
        iq->size = cxgbe_roundup(iq->size, 16);
 
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->device->driver->name,
-                fwevtq ? "fwq_ring" : "rx_ring",
-                eth_dev->data->port_id, queue_id);
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id,
+                       fwevtq ? "fwq_ring" : "rx_ring");
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
        iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
@@ -1761,12 +1816,16 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                            F_FW_CMD_WRITE | F_FW_CMD_EXEC);
 
        if (is_pf4(adap)) {
-               pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
+               pciechan = pi->tx_chan;
                c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
                                     V_FW_IQ_CMD_VFN(0));
                if (cong >= 0)
-                       c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
-                                                   F_FW_IQ_CMD_IQRO);
+                       c.iqns_to_fl0congen =
+                               htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+                                     V_FW_IQ_CMD_IQTYPE(cong ?
+                                                        FW_IQ_IQTYPE_NIC :
+                                                        FW_IQ_IQTYPE_OFLD) |
+                                     F_FW_IQ_CMD_IQRO);
        } else {
                pciechan = pi->port_id;
        }
@@ -1806,10 +1865,9 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                        fl->size = s->fl_starve_thres - 1 + 2 * 8;
                fl->size = cxgbe_roundup(fl->size, 8);
 
-               snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                        eth_dev->device->driver->name,
-                        fwevtq ? "fwq_ring" : "fl_ring",
-                        eth_dev->data->port_id, queue_id);
+               snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                               eth_dev->data->port_id, queue_id,
+                               fwevtq ? "fwq_ring" : "fl_ring");
                snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
                fl->desc = alloc_ring(fl->size, sizeof(__be64),
@@ -2012,9 +2070,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        /* Add status entries */
        nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
 
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                eth_dev->device->driver->name, "tx_ring",
-                eth_dev->data->port_id, queue_id);
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id, "tx_ring");
        snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
 
        txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
@@ -2077,6 +2134,63 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        return 0;
 }
 
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+                         struct rte_eth_dev *eth_dev, uint16_t queue_id,
+                         unsigned int iqid, int socket_id)
+{
+       int ret, nentries;
+       struct fw_eq_ctrl_cmd c;
+       struct sge *s = &adap->sge;
+       struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+       /* Add status entries */
+       nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                       eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
+       snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+       txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+                                0, &txq->q.phys_addr,
+                                NULL, 0, queue_id,
+                                socket_id, z_name, z_name_sw);
+       if (!txq->q.desc)
+               return -ENOMEM;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+                           F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+                           V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+                           V_FW_EQ_CTRL_CMD_VFN(0));
+       c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+                                F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+       c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+       c.physeqid_pkd = htonl(0);
+       c.fetchszm_to_iqid =
+               htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+                     V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+                     F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+       c.dcaen_to_eqsize =
+               htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+                     V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+                     V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+       c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+       if (ret) {
+               txq->q.desc = NULL;
+               return ret;
+       }
+
+       init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+                G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+       txq->adapter = adap;
+       txq->full = 0;
+       return 0;
+}
+
 static void free_txq(struct sge_txq *q)
 {
        q->cntxt_id = 0;
@@ -2171,7 +2285,7 @@ void t4_sge_tx_monitor_stop(struct adapter *adap)
  */
 void t4_free_sge_resources(struct adapter *adap)
 {
-       int i;
+       unsigned int i;
        struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
        struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
 
@@ -2188,6 +2302,18 @@ void t4_free_sge_resources(struct adapter *adap)
                }
        }
 
+       /* clean up control Tx queues */
+       for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+               struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+               if (cq->q.desc) {
+                       reclaim_completed_tx_imm(&cq->q);
+                       t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+                                       cq->q.cntxt_id);
+                       free_txq(&cq->q);
+               }
+       }
+
        if (adap->sge.fw_evtq.desc)
                free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
 }