X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fsge.c;h=aba85a2090ea11d109f2674815a3e03443bc4f01;hb=7ba166841c68053484e1a520da3fd2186be33830;hp=c7abd8dd754f2e5c0ca9bf961bc786dd20ee468a;hpb=92c8a63223e590b675b7ee1baee889ab6e2748e8;p=dpdk.git diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c index c7abd8dd75..aba85a2090 100644 --- a/drivers/net/cxgbe/sge.c +++ b/drivers/net/cxgbe/sge.c @@ -1,37 +1,8 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2015 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ -#include #include #include #include @@ -57,24 +28,37 @@ #include #include #include -#include -#include +#include #include #include #include -#include "common.h" -#include "t4_regs.h" -#include "t4_msg.h" +#include "base/common.h" +#include "base/t4_regs.h" +#include "base/t4_msg.h" #include "cxgbe.h" +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq); + /* * Max number of Rx buffers we replenish at a time. */ -#define MAX_RX_REFILL 16U +#define MAX_RX_REFILL 64U #define NOMEM_TMR_IDX (SGE_NTIMERS - 1) +/* + * Max Tx descriptor space we allow for an Ethernet packet to be inlined + * into a WR. + */ +#define MAX_IMM_TX_PKT_LEN 256 + +/* + * Max size of a WR sent through a control Tx queue. + */ +#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN + /* * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet * per mbuf buffer). We currently only support two sizes for 1500- and @@ -89,7 +73,8 @@ static inline unsigned int fl_mtu_bufsize(struct adapter *adapter, { struct sge *s = &adapter->sge; - return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); + return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu, + s->fl_align); } #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL) @@ -124,6 +109,90 @@ enum { RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */ }; +/** + * txq_avail - return the number of available slots in a Tx queue + * @q: the Tx queue + * + * Returns the number of descriptors in a Tx queue available to write new + * packets. + */ +static inline unsigned int txq_avail(const struct sge_txq *q) +{ + return q->size - 1 - q->in_use; +} + +static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr) +{ + struct rte_mbuf *m = mbuf; + + for (; m; m = m->next, addr++) { + *addr = m->buf_iova + rte_pktmbuf_headroom(m); + if (*addr == 0) + goto out_err; + } + return 0; + +out_err: + return -ENOMEM; +} + +/** + * free_tx_desc - reclaims Tx descriptors and their buffers + * @q: the Tx queue to reclaim descriptors from + * @n: the number of descriptors to reclaim + * + * Reclaims Tx descriptors from an SGE Tx queue and frees the associated + * Tx buffers. Called with the Tx queue lock held. + */ +static void free_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = 0; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool); + } +} + +static void reclaim_tx_desc(struct sge_txq *q, unsigned int n) +{ + struct tx_sw_desc *d; + unsigned int cidx = q->cidx; + + d = &q->sdesc[cidx]; + while (n--) { + if (d->mbuf) { /* an SGL is present */ + rte_pktmbuf_free(d->mbuf); + d->mbuf = NULL; + } + ++d; + if (++cidx == q->size) { + cidx = 0; + d = q->sdesc; + } + } + q->cidx = cidx; +} + /** * fl_cap - return the capacity of a free-buffer list * @fl: the FL @@ -157,19 +226,10 @@ static inline bool fl_starving(const struct adapter *adapter, static inline unsigned int get_buf_size(struct adapter *adapter, const struct rx_sw_desc *d) { - struct sge *s = &adapter->sge; unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; - unsigned int buf_size; + unsigned int buf_size = 0; switch (rx_buf_size_idx) { - case RX_SMALL_PG_BUF: - buf_size = PAGE_SIZE; - break; - - case RX_LARGE_PG_BUF: - buf_size = PAGE_SIZE << s->fl_pg_order; - break; - case RX_SMALL_MTU_BUF: buf_size = FL_MTU_SMALL_BUFSIZE(adapter); break; @@ -180,8 +240,7 @@ static inline unsigned int get_buf_size(struct adapter *adapter, default: BUG_ON(1); - buf_size = 0; /* deal with bogus compiler warnings */ - /* NOTREACHED */ + /* NOT REACHED */ } return buf_size; @@ -235,7 +294,7 @@ static void unmap_rx_buf(struct sge_fl *q) static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) { - if (q->pend_cred >= 8) { + if (q->pend_cred >= 64) { u32 val = adap->params.arch.sge_fl_db; if (is_t4(adap->params.chip)) @@ -255,12 +314,16 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) * mechanism. */ if (unlikely(!q->bar2_addr)) { - t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), - val | V_QID(q->cntxt_id)); + u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_KDOORBELL; + + t4_write_reg_relaxed(adap, reg, + val | V_QID(q->cntxt_id)); } else { - writel(val | V_QID(q->bar2_qid), - (void *)((uintptr_t)q->bar2_addr + - SGE_UDB_KDOORBELL)); + writel_relaxed(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + + SGE_UDB_KDOORBELL)); /* * This Write memory Barrier will force the write to @@ -272,15 +335,6 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) } } -static inline struct rte_mbuf *cxgbe_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf, dma_addr_t mapping) { @@ -309,9 +363,29 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, __be64 *d = &q->desc[q->pidx]; struct rx_sw_desc *sd = &q->sdesc[q->pidx]; unsigned int buf_size_idx = RX_SMALL_MTU_BUF; + struct rte_mbuf *buf_bulk[n]; + int ret, i; + struct rte_pktmbuf_pool_private *mbp_priv; + u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_JUMBO_FRAME; + + /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */ + mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool); + if (jumbo_en && + ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)) + buf_size_idx = RX_LARGE_MTU_BUF; + + ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n); + if (unlikely(ret != 0)) { + dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n", + __func__); + q->alloc_failed++; + rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++; + goto out; + } - while (n--) { - struct rte_mbuf *mbuf = cxgbe_rxmbuf_alloc(rxq->rspq.mb_pool); + for (i = 0; i < n; i++) { + struct rte_mbuf *mbuf = buf_bulk[i]; dma_addr_t mapping; if (!mbuf) { @@ -321,11 +395,20 @@ static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q, goto out; } - mbuf->data_off = RTE_PKTMBUF_HEADROOM; + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->data_off = + (uint16_t)((char *) + RTE_PTR_ALIGN((char *)mbuf->buf_addr + + RTE_PKTMBUF_HEADROOM, + adap->sge.fl_align) - + (char *)mbuf->buf_addr); mbuf->next = NULL; + mbuf->nb_segs = 1; + mbuf->port = rxq->rspq.port_id; - mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off); - + mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova + + mbuf->data_off, + adap->sge.fl_align); mapping |= buf_size_idx; *d++ = cpu_to_be64(mapping); set_rx_sw_desc(sd, mbuf, mapping); @@ -375,201 +458,1037 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail)); } +/* + * Return the number of reclaimable descriptors in a Tx queue. + */ +static inline int reclaimable(const struct sge_txq *q) +{ + int hw_cidx = ntohs(q->stat->cidx); + + hw_cidx -= q->cidx; + if (hw_cidx < 0) + return hw_cidx + q->size; + return hw_cidx; +} + /** - * alloc_ring - allocate resources for an SGE descriptor ring - * @dev: the PCI device's core device - * @nelem: the number of descriptors - * @elem_size: the size of each descriptor - * @sw_size: the size of the SW state associated with each ring element - * @phys: the physical address of the allocated ring - * @metadata: address of the array holding the SW state for the ring - * @stat_size: extra space in HW ring for status information - * @node: preferred node for memory allocations + * reclaim_completed_tx - reclaims completed Tx descriptors + * @q: the Tx queue to reclaim completed descriptors from * - * Allocates resources for an SGE descriptor ring, such as Tx queues, - * free buffer lists, or response queues. Each SGE ring requires - * space for its HW descriptors plus, optionally, space for the SW state - * associated with each HW entry (the metadata). The function returns - * three values: the virtual address for the HW ring (the return value - * of the function), the bus address of the HW ring, and the address - * of the SW ring. + * Reclaims Tx descriptors that the SGE has indicated it has processed. */ -static void *alloc_ring(size_t nelem, size_t elem_size, - size_t sw_size, dma_addr_t *phys, void *metadata, - size_t stat_size, __rte_unused uint16_t queue_id, - int socket_id, const char *z_name, - const char *z_name_sw) +void reclaim_completed_tx(struct sge_txq *q) { - size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; - const struct rte_memzone *tz; - void *s = NULL; + unsigned int avail = reclaimable(q); + + do { + /* reclaim as much as possible */ + reclaim_tx_desc(q, avail); + q->in_use -= avail; + avail = reclaimable(q); + } while (avail); +} - dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " - "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" - " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, - stat_size, queue_id, socket_id, z_name, z_name_sw); +/** + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + /* + * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA + * addresses. The DSGL Work Request starts off with a 32-bit DSGL + * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N, + * repeated sequences of { Length[i], Length[i+1], Address[i], + * Address[i+1] } (this ensures that all addresses are on 64-bit + * boundaries). If N is even, then Length[N+1] should be set to 0 and + * Address[N+1] is omitted. + * + * The following calculation incorporates all of the above. It's + * somewhat hard to follow but, briefly: the "+2" accounts for the + * first two flits which include the DSGL header, Length0 and + * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 + * flits for every pair of the remaining N) +1 if (n-1) is odd; and + * finally the "+((n-1)&1)" adds the one remaining flit needed if + * (n-1) is odd ... + */ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} - tz = rte_memzone_lookup(z_name); - if (tz) { - dev_debug(adapter, "%s: tz exists...returning existing..\n", - __func__); - goto alloc_sw_ring; +/** + * flits_to_desc - returns the num of Tx descriptors for the given flits + * @n: the number of flits + * + * Returns the number of Tx descriptors needed for the supplied number + * of flits. + */ +static inline unsigned int flits_to_desc(unsigned int n) +{ + return DIV_ROUND_UP(n, 8); +} + +/** + * is_eth_imm - can an Ethernet packet be sent as immediate data? + * @m: the packet + * + * Returns whether an Ethernet packet is small enough to fit as + * immediate data. Return value corresponds to the headroom required. + */ +static inline int is_eth_imm(const struct rte_mbuf *m) +{ + unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ? + sizeof(struct cpl_tx_pkt_lso_core) : 0; + + hdrlen += sizeof(struct cpl_tx_pkt); + if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen) + return hdrlen; + + return 0; +} + +/** + * calc_tx_flits - calculate the number of flits for a packet Tx WR + * @m: the packet + * @adap: adapter structure pointer + * + * Returns the number of flits needed for a Tx WR for the given Ethernet + * packet, including the needed WR and CPL headers. + */ +static inline unsigned int calc_tx_flits(const struct rte_mbuf *m, + struct adapter *adap) +{ + size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) : + sizeof(struct fw_eth_tx_pkt_vm_wr); + unsigned int flits; + int hdrlen; + + /* + * If the mbuf is small enough, we can pump it out as a work request + * with only immediate data. In that case we just have to have the + * TX Packet header plus the mbuf data in the Work Request. + */ + + hdrlen = is_eth_imm(m); + if (hdrlen) + return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64)); + + /* + * Otherwise, we're going to have to construct a Scatter gather list + * of the mbuf body and fragments. We also include the flits necessary + * for the TX Packet Work Request and CPL. We always have a firmware + * Write Header (incorporated as part of the cpl_tx_pkt_lso and + * cpl_tx_pkt structures), followed by either a TX Packet Write CPL + * message or, if we're doing a Large Send Offload, an LSO CPL message + * with an embedded TX Packet Write CPL message. + */ + flits = sgl_len(m->nb_segs); + if (m->tso_segsz) + flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + else + flits += (wr_size + + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64); + return flits; +} + +/** + * write_sgl - populate a scatter/gather list for a packet + * @mbuf: the packet + * @q: the Tx queue we are writing into + * @sgl: starting location for writing the SGL + * @end: points right after the end of the SGL + * @start: start offset into mbuf main-body data to include in the SGL + * @addr: address of mapped region + * + * Generates a scatter/gather list for the buffers that make up a packet. + * The caller must provide adequate space for the SGL that will be written. + * The SGL includes all of the packet's page fragments and the data in its + * main body except for the first @start bytes. @sgl must be 16-byte + * aligned and within a Tx descriptor with available space. @end points + * write after the end of the SGL but does not account for any potential + * wrap around, i.e., @end > @sgl. + */ +static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q, + struct ulptx_sgl *sgl, u64 *end, unsigned int start, + const dma_addr_t *addr) +{ + unsigned int i, len; + struct ulptx_sge_pair *to; + struct rte_mbuf *m = mbuf; + unsigned int nfrags = m->nb_segs; + struct ulptx_sge_pair buf[nfrags / 2]; + + len = m->data_len - start; + sgl->len0 = htonl(len); + sgl->addr0 = rte_cpu_to_be_64(addr[0]); + + sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) | + V_ULPTX_NSGE(nfrags)); + if (likely(--nfrags == 0)) + return; + /* + * Most of the complexity below deals with the possibility we hit the + * end of the queue in the middle of writing the SGL. For this case + * only we create the SGL in a temporary buffer and then copy it. + */ + to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; + + for (i = 0; nfrags >= 2; nfrags -= 2, to++) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->addr[0] = rte_cpu_to_be_64(addr[++i]); + m = m->next; + to->len[1] = rte_cpu_to_be_32(m->data_len); + to->addr[1] = rte_cpu_to_be_64(addr[++i]); + } + if (nfrags) { + m = m->next; + to->len[0] = rte_cpu_to_be_32(m->data_len); + to->len[1] = rte_cpu_to_be_32(0); + to->addr[0] = rte_cpu_to_be_64(addr[i + 1]); } + if (unlikely((u8 *)end > (u8 *)q->stat)) { + unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat, + (u8 *)sgl->sge); + unsigned int part1; + + if (likely(part0)) + memcpy(sgl->sge, buf, part0); + part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat); + rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1); + end = RTE_PTR_ADD((void *)q->desc, part1); + } + if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ + *(u64 *)end = 0; +} + +#define IDXDIFF(head, tail, wrap) \ + ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) + +#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size) +#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size) + +#define PIDXDIFF(head, tail, wrap) \ + ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail)) +#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size) + +/** + * ring_tx_db - ring a Tx queue's doorbell + * @adap: the adapter + * @q: the Tx queue + * @n: number of new descriptors to give to HW + * + * Ring the doorbel for a Tx queue. + */ +static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q) +{ + int n = Q_IDXDIFF(q, dbidx); /* - * Allocate TX/RX ring hardware descriptors. A memzone large enough to - * handle the maximum ring size is allocated in order to allow for - * resizing in later calls to the queue setup function. + * Make sure that all writes to the TX Descriptors are committed + * before we tell the hardware about them. */ - tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096); - if (!tz) - return NULL; + rte_wmb(); -alloc_sw_ring: - memset(tz->addr, 0, len); - if (sw_size) { - s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, - RTE_CACHE_LINE_SIZE, socket_id); + /* + * If we don't have access to the new User Doorbell (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!q->bar2_addr)) { + u32 val = V_PIDX(n); - if (!s) { - dev_err(adapter, "%s: failed to get sw_ring memory\n", - __func__); - return NULL; + /* + * For T4 we need to participate in the Doorbell Recovery + * mechanism. + */ + if (!q->db_disabled) + t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL), + V_QID(q->cntxt_id) | val); + else + q->db_pidx_inc += n; + q->db_pidx = q->pidx; + } else { + u32 val = V_PIDX_T5(n); + + /* + * T4 and later chips share the same PIDX field offset within + * the doorbell, but T5 and later shrank the field in order to + * gain a bit for Doorbell Priority. The field was absurdly + * large in the first place (14 bits) so we just use the T5 + * and later limits and warn if a Queue ID is too large. + */ + WARN_ON(val & F_DBPRIO); + + writel(val | V_QID(q->bar2_qid), + (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL)); + + /* + * This Write Memory Barrier will force the write to the User + * Doorbell area to be flushed. This is needed to prevent + * writes on different CPUs for the same queue from hitting + * the adapter out of order. This is required when some Work + * Requests take the Write Combine Gather Buffer path (user + * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some + * take the traditional path where we simply increment the + * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the + * hardware DMA read the actual Work Request. + */ + rte_wmb(); + } + q->dbidx = q->pidx; +} + +/* + * Figure out what HW csum a packet wants and return the appropriate control + * bits. + */ +static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m) +{ + int csum_type; + + if (m->ol_flags & PKT_TX_IP_CKSUM) { + switch (m->ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + csum_type = TX_CSUM_TCPIP; + break; + case PKT_TX_UDP_CKSUM: + csum_type = TX_CSUM_UDPIP; + break; + default: + goto nocsum; } + } else { + goto nocsum; } - if (metadata) - *(void **)metadata = s; - *phys = (uint64_t)tz->phys_addr; - return tz->addr; + if (likely(csum_type >= TX_CSUM_TCPIP)) { + u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len); + int eth_hdr_len = m->l2_len; + + if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) + hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len); + else + hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len); + return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len; + } +nocsum: + /* + * unknown protocol, disable HW csum + * and hope a bad packet is detected + */ + return F_TXPKT_L4CSUM_DIS; +} + +static inline void txq_advance(struct sge_txq *q, unsigned int n) +{ + q->in_use += n; + q->pidx += n; + if (q->pidx >= q->size) + q->pidx -= q->size; +} + +#define MAX_COALESCE_LEN 64000 + +static inline int wraps_around(struct sge_txq *q, int ndesc) +{ + return (q->pidx + ndesc) > q->size ? 1 : 0; +} + +static void tx_timer_cb(void *data) +{ + struct adapter *adap = (struct adapter *)data; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + int i; + unsigned int coal_idx; + + /* monitor any pending tx */ + for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) { + if (t4_os_trylock(&txq->txq_lock)) { + coal_idx = txq->q.coalesce.idx; + if (coal_idx) { + if (coal_idx == txq->q.last_coal_idx && + txq->q.pidx == txq->q.last_pidx) { + ship_tx_pkt_coalesce_wr(adap, txq); + } else { + txq->q.last_coal_idx = coal_idx; + txq->q.last_pidx = txq->q.pidx; + } + } + t4_os_unlock(&txq->txq_lock); + } + } + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); } /** - * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list - * @gl: the gather list + * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR + * @ adap: adapter structure + * @txq: tx queue * - * Builds an mbuf from the given packet gather list. Returns the mbuf or - * %NULL if mbuf allocation failed. + * writes the different fields of the pkts WR and sends it. */ -static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl) +static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap, + struct sge_eth_txq *txq) { - /* - * If there's only one mbuf fragment, just return that. + struct fw_eth_tx_pkts_vm_wr *vmwr; + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + struct fw_eth_tx_pkts_wr *wr; + struct sge_txq *q = &txq->q; + unsigned int ndesc; + u32 wr_mid; + + /* fill the pkts WR header */ + wr = (void *)&q->desc[q->pidx]; + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + vmwr = (void *)&q->desc[q->pidx]; + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2)); + ndesc = flits_to_desc(q->coalesce.flits); + wr->equiq_to_len16 = htonl(wr_mid); + wr->plen = cpu_to_be16(q->coalesce.len); + wr->npkt = q->coalesce.idx; + wr->r3 = 0; + if (is_pf4(adap)) { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); + wr->type = q->coalesce.type; + } else { + wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); + vmwr->r4 = 0; + memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst, + fw_hdr_copy_len); + } + + /* zero out coalesce structure members */ + memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce)); + + txq_advance(q, ndesc); + txq->stats.coal_wr++; + txq->stats.coal_pkts += wr->npkt; + + if (Q_IDXDIFF(q, equeidx) >= q->size / 2) { + q->equeidx = q->pidx; + wr_mid |= F_FW_WR_EQUEQ; + wr->equiq_to_len16 = htonl(wr_mid); + } + ring_tx_db(adap, q); +} + +/** + * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not + * @txq: tx queue where the mbuf is sent + * @mbuf: mbuf to be sent + * @nflits: return value for number of flits needed + * @adap: adapter structure + * + * This function decides if a packet should be coalesced or not. + */ +static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + unsigned int *nflits, + struct adapter *adap) +{ + struct fw_eth_tx_pkts_vm_wr *wr; + const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) + + sizeof(wr->ethmacsrc) + + sizeof(wr->ethtype) + + sizeof(wr->vlantci)); + struct sge_txq *q = &txq->q; + unsigned int flits, ndesc; + unsigned char type = 0; + int credits, wr_size; + + /* use coal WR type 1 when no frags are present */ + type = (mbuf->nb_segs == 1) ? 1 : 0; + if (!is_pf4(adap)) { + if (!type) + return 0; + + if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), + fw_hdr_copy_len)) + ship_tx_pkt_coalesce_wr(adap, txq); + } + + if (unlikely(type != q->coalesce.type && q->coalesce.idx)) + ship_tx_pkt_coalesce_wr(adap, txq); + + /* calculate the number of flits required for coalescing this packet + * without the 2 flits of the WR header. These are added further down + * if we are just starting in new PKTS WR. sgl_len doesn't account for + * the possible 16 bytes alignment ULP TX commands so we do it here. */ - if (likely(gl->nfrags == 1)) - return gl->mbufs[0]; + flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U; + if (type == 0) + flits += (sizeof(struct ulp_txpkt) + + sizeof(struct ulptx_idata)) / sizeof(__be64); + flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64); + *nflits = flits; + + /* If coalescing is on, the mbuf is added to a pkts WR */ + if (q->coalesce.idx) { + ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8); + credits = txq_avail(q) - ndesc; + + /* If we are wrapping or this is last mbuf then, send the + * already coalesced mbufs and let the non-coalesce pass + * handle the mbuf. + */ + if (unlikely(credits < 0 || wraps_around(q, ndesc))) { + ship_tx_pkt_coalesce_wr(adap, txq); + return 0; + } - return NULL; + /* If the max coalesce len or the max WR len is reached + * ship the WR and keep coalescing on. + */ + if (unlikely((q->coalesce.len + mbuf->pkt_len > + MAX_COALESCE_LEN) || + (q->coalesce.flits + flits > + q->coalesce.max))) { + ship_tx_pkt_coalesce_wr(adap, txq); + goto new; + } + return 1; + } + +new: + /* start a new pkts WR, the WR header is not filled below */ + wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) : + sizeof(struct fw_eth_tx_pkts_vm_wr); + flits += wr_size / sizeof(__be64); + ndesc = flits_to_desc(q->coalesce.flits + flits); + credits = txq_avail(q) - ndesc; + + if (unlikely(credits < 0 || wraps_around(q, ndesc))) + return 0; + q->coalesce.flits += wr_size / sizeof(__be64); + q->coalesce.type = type; + q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] + + q->coalesce.flits * sizeof(__be64); + if (!is_pf4(adap)) + memcpy((void *)q->coalesce.ethmacdst, + rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len); + return 1; } /** - * t4_pktgl_to_mbuf - build an mbuf from a packet gather list - * @gl: the gather list + * tx_do_packet_coalesce - add an mbuf to a coalesce WR + * @txq: sge_eth_txq used send the mbuf + * @mbuf: mbuf to be sent + * @flits: flits needed for this mbuf + * @adap: adapter structure + * @pi: port_info structure + * @addr: mapped address of the mbuf * - * Builds an mbuf from the given packet gather list. Returns the mbuf or - * %NULL if mbuf allocation failed. + * Adds an mbuf to be sent as part of a coalesce WR by filling a + * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and + * ulp_tx_sc_dsgl command. */ -static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl) +static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq, + struct rte_mbuf *mbuf, + int flits, struct adapter *adap, + const struct port_info *pi, + dma_addr_t *addr, uint16_t nb_pkts) { - return t4_pktgl_to_mbuf_usembufs(gl); + u64 cntrl, *end; + struct sge_txq *q = &txq->q; + struct ulp_txpkt *mc; + struct ulptx_idata *sc_imm; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *sd; + unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len; + + if (q->coalesce.type == 0) { + mc = (struct ulp_txpkt *)q->coalesce.ptr; + mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) | + V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) | + F_ULP_TXPKT_RO); + mc->len = htonl(DIV_ROUND_UP(flits, 2)); + sc_imm = (struct ulptx_idata *)(mc + 1); + sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) | + F_ULP_TX_SC_MORE); + sc_imm->len = htonl(sizeof(*cpl)); + end = (u64 *)mc + flits; + cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1); + } else { + end = (u64 *)q->coalesce.ptr + flits; + cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr; + } + + /* update coalesce structure for this txq */ + q->coalesce.flits += flits; + q->coalesce.ptr += flits * sizeof(__be64); + q->coalesce.len += mbuf->pkt_len; + + /* fill the cpl message, same as in t4_eth_xmit, this should be kept + * similar to t4_eth_xmit + */ + if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, mbuf) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } else { + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + } + + if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id)); + cpl->pack = htons(0); + cpl->len = htons(len); + cpl->ctrl1 = cpu_to_be64(cntrl); + write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr); + txq->stats.pkts++; + txq->stats.tx_bytes += len; + + sd = &q->sdesc[q->pidx + (idx >> 1)]; + if (!(idx & 1)) { + if (sd->coalesce.idx) { + int i; + + for (i = 0; i < sd->coalesce.idx; i++) { + rte_pktmbuf_free(sd->coalesce.mbuf[i]); + sd->coalesce.mbuf[i] = NULL; + } + } + } + + /* store pointers to the mbuf and the sgl used in free_tx_desc. + * each tx desc can hold two pointers corresponding to the value + * of ETH_COALESCE_PKT_PER_DESC + */ + sd->coalesce.mbuf[idx & 1] = mbuf; + sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1); + sd->coalesce.idx = (idx & 1) + 1; + + /* Send the coalesced work request, only if max reached. However, + * if lower latency is preferred over throughput, then don't wait + * for coalescing the next Tx burst and send the packets now. + */ + q->coalesce.idx++; + if (q->coalesce.idx == adap->params.max_tx_coalesce_num || + (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts)) + ship_tx_pkt_coalesce_wr(adap, txq); + + return 0; } -#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ - ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off)) +/** + * t4_eth_xmit - add a packet to an Ethernet Tx queue + * @txq: the egress queue + * @mbuf: the packet + * + * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled. + */ +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf, + uint16_t nb_pkts) +{ + const struct port_info *pi; + struct cpl_tx_pkt_lso_core *lso; + struct adapter *adap; + struct rte_mbuf *m = mbuf; + struct fw_eth_tx_pkt_wr *wr; + struct fw_eth_tx_pkt_vm_wr *vmwr; + struct cpl_tx_pkt_core *cpl; + struct tx_sw_desc *d; + dma_addr_t addr[m->nb_segs]; + unsigned int flits, ndesc, cflits; + int l3hdr_len, l4hdr_len, eth_xtra_len; + int len, last_desc; + int credits; + u32 wr_mid; + u64 cntrl, *end; + bool v6; + u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len; + + /* Reject xmit if queue is stopped */ + if (unlikely(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + /* + * The chip min packet length is 10 octets but play safe and reject + * anything shorter than an Ethernet header. + */ + if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) { +out_free: + rte_pktmbuf_free(m); + return 0; + } + + if ((!(m->ol_flags & PKT_TX_TCP_SEG)) && + (unlikely(m->pkt_len > max_pkt_len))) + goto out_free; + + pi = txq->data->dev_private; + adap = pi->adapter; + + cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS; + /* align the end of coalesce WR to a 512 byte boundary */ + txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8; + + if (!((m->ol_flags & PKT_TX_TCP_SEG) || + m->pkt_len > RTE_ETHER_MAX_LEN)) { + if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) { + if (unlikely(map_mbuf(mbuf, addr) < 0)) { + dev_warn(adap, "%s: mapping err for coalesce\n", + __func__); + txq->stats.mapping_err++; + goto out_free; + } + return tx_do_packet_coalesce(txq, mbuf, cflits, adap, + pi, addr, nb_pkts); + } else { + return -EBUSY; + } + } + + if (txq->q.coalesce.idx) + ship_tx_pkt_coalesce_wr(adap, txq); + + flits = calc_tx_flits(m, adap); + ndesc = flits_to_desc(flits); + credits = txq_avail(&txq->q) - ndesc; + + if (unlikely(credits < 0)) { + dev_debug(adap, "%s: Tx ring %u full; credits = %d\n", + __func__, txq->q.cntxt_id, credits); + return -EBUSY; + } + + if (unlikely(map_mbuf(m, addr) < 0)) { + txq->stats.mapping_err++; + goto out_free; + } + + wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2)); + if (Q_IDXDIFF(&txq->q, equeidx) >= 64) { + txq->q.equeidx = txq->q.pidx; + wr_mid |= F_FW_WR_EQUEQ; + } + + wr = (void *)&txq->q.desc[txq->q.pidx]; + vmwr = (void *)&txq->q.desc[txq->q.pidx]; + wr->equiq_to_len16 = htonl(wr_mid); + if (is_pf4(adap)) { + wr->r3 = rte_cpu_to_be_64(0); + end = (u64 *)wr + flits; + } else { + const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) + + sizeof(vmwr->ethmacsrc) + + sizeof(vmwr->ethtype) + + sizeof(vmwr->vlantci)); + + vmwr->r3[0] = rte_cpu_to_be_32(0); + vmwr->r3[1] = rte_cpu_to_be_32(0); + memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *), + fw_hdr_copy_len); + end = (u64 *)vmwr + flits; + } + + len = 0; + len += sizeof(*cpl); + + /* Coalescing skipped and we send through normal path */ + if (!(m->ol_flags & PKT_TX_TCP_SEG)) { + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | + V_FW_WR_IMMDLEN(len)); + if (is_pf4(adap)) + cpl = (void *)(wr + 1); + else + cpl = (void *)(vmwr + 1); + if (m->ol_flags & PKT_TX_IP_CKSUM) { + cntrl = hwcsum(adap->params.chip, m) | + F_TXPKT_IPCSUM_DIS; + txq->stats.tx_cso++; + } + } else { + if (is_pf4(adap)) + lso = (void *)(wr + 1); + else + lso = (void *)(vmwr + 1); + v6 = (m->ol_flags & PKT_TX_IPV6) != 0; + l3hdr_len = m->l3_len; + l4hdr_len = m->l4_len; + eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN; + len += sizeof(*lso); + wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ? + FW_ETH_TX_PKT_WR : + FW_ETH_TX_PKT_VM_WR) | + V_FW_WR_IMMDLEN(len)); + lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) | + F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | + V_LSO_IPV6(v6) | + V_LSO_ETHHDR_LEN(eth_xtra_len / 4) | + V_LSO_IPHDR_LEN(l3hdr_len / 4) | + V_LSO_TCPHDR_LEN(l4hdr_len / 4)); + lso->ipid_ofst = htons(0); + lso->mss = htons(m->tso_segsz); + lso->seqno_offset = htonl(0); + if (is_t4(adap->params.chip)) + lso->len = htonl(m->pkt_len); + else + lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len)); + cpl = (void *)(lso + 1); + + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) + cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len); + else + cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len); + + cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : + TX_CSUM_TCPIP) | + V_TXPKT_IPHDR_LEN(l3hdr_len); + txq->stats.tso++; + txq->stats.tx_cso += m->tso_segsz; + } + + if (m->ol_flags & PKT_TX_VLAN_PKT) { + txq->stats.vlan_ins++; + cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci); + } + + cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT)); + if (is_pf4(adap)) + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) | + V_TXPKT_PF(adap->pf)); + else + cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) | + V_TXPKT_PF(0)); + + cpl->pack = htons(0); + cpl->len = htons(m->pkt_len); + cpl->ctrl1 = cpu_to_be64(cntrl); + + txq->stats.pkts++; + txq->stats.tx_bytes += m->pkt_len; + last_desc = txq->q.pidx + ndesc - 1; + if (last_desc >= (int)txq->q.size) + last_desc -= txq->q.size; + + d = &txq->q.sdesc[last_desc]; + if (d->coalesce.idx) { + int i; + + for (i = 0; i < d->coalesce.idx; i++) { + rte_pktmbuf_free(d->coalesce.mbuf[i]); + d->coalesce.mbuf[i] = NULL; + } + d->coalesce.idx = 0; + } + write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0, + addr); + txq->q.sdesc[last_desc].mbuf = m; + txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1); + txq_advance(&txq->q, ndesc); + ring_tx_db(adap, &txq->q); + return 0; +} /** - * t4_ethrx_handler - process an ingress ethernet packet - * @q: the response queue that received the packet - * @rsp: the response queue descriptor holding the RX_PKT message - * @si: the gather list of packet fragments + * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs + * @q: the SGE control Tx queue * - * Process an ingress ethernet packet and deliver it to the stack. + * This is a variant of reclaim_completed_tx() that is used for Tx queues + * that send only immediate data (presently just the control queues) and + * thus do not have any mbufs to release. */ -int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, - const struct pkt_gl *si) +static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - struct rte_mbuf *mbuf; - const struct cpl_rx_pkt *pkt; - const struct rss_header *rss_hdr; - bool csum_ok; - struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + int hw_cidx = ntohs(q->stat->cidx); + int reclaim = hw_cidx - q->cidx; - rss_hdr = (const void *)rsp; - pkt = (const void *)&rsp[1]; - csum_ok = pkt->csum_calc && !pkt->err_vec; + if (reclaim < 0) + reclaim += q->size; - mbuf = t4_pktgl_to_mbuf(si); - if (unlikely(!mbuf)) { - rxq->stats.rx_drops++; - return 0; + q->in_use -= reclaim; + q->cidx = hw_cidx; +} + +/** + * is_imm - check whether a packet can be sent as immediate data + * @mbuf: the packet + * + * Returns true if a packet can be sent as a WR with immediate data. + */ +static inline int is_imm(const struct rte_mbuf *mbuf) +{ + return mbuf->pkt_len <= MAX_CTRL_WR_LEN; +} + +/** + * inline_tx_mbuf: inline a packet's data into TX descriptors + * @q: the TX queue where the packet will be inlined + * @from: pointer to data portion of packet + * @to: pointer after cpl where data has to be inlined + * @len: length of data to inline + * + * Inline a packet's contents directly to TX descriptors, starting at + * the given position within the TX DMA ring. + * Most of the complexity of this operation is dealing with wrap arounds + * in the middle of the packet we want to inline. + */ +static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to, + int len) +{ + int left = RTE_PTR_DIFF(q->stat, *to); + + if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) { + rte_memcpy(*to, from, len); + *to = RTE_PTR_ADD(*to, len); + } else { + rte_memcpy(*to, from, left); + from = RTE_PTR_ADD(from, left); + left = len - left; + rte_memcpy((void *)q->desc, from, left); + *to = RTE_PTR_ADD((void *)q->desc, left); + } +} + +/** + * ctrl_xmit - send a packet through an SGE control Tx queue + * @q: the control queue + * @mbuf: the packet + * + * Send a packet through an SGE control Tx queue. Packets sent through + * a control queue must fit entirely as immediate data. + */ +static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + unsigned int ndesc; + struct fw_wr_hdr *wr; + caddr_t dst; + + if (unlikely(!is_imm(mbuf))) { + WARN_ON(1); + rte_pktmbuf_free(mbuf); + return -1; } - mbuf->port = pkt->iff; - if (pkt->l2info & htonl(F_RXF_IP)) { - mbuf->ol_flags |= PKT_RX_IPV4_HDR; - if (unlikely(!csum_ok)) - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + reclaim_completed_tx_imm(&q->q); + ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc)); + t4_os_lock(&q->ctrlq_lock); - if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok) - mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; - } else if (pkt->l2info & htonl(F_RXF_IP6)) { - mbuf->ol_flags |= PKT_RX_IPV6_HDR; + q->full = txq_avail(&q->q) < ndesc ? 1 : 0; + if (unlikely(q->full)) { + t4_os_unlock(&q->ctrlq_lock); + return -1; } - mbuf->port = pkt->iff; + wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; + dst = (void *)wr; + inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t), + &dst, mbuf->data_len); + + txq_advance(&q->q, ndesc); + if (unlikely(txq_avail(&q->q) < 64)) + wr->lo |= htonl(F_FW_WR_EQUEQ); + + q->txp++; + + ring_tx_db(q->adapter, &q->q); + t4_os_unlock(&q->ctrlq_lock); + + rte_pktmbuf_free(mbuf); + return 0; +} + +/** + * t4_mgmt_tx - send a management message + * @q: the control queue + * @mbuf: the packet containing the management message + * + * Send a management message through control queue. + */ +int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf) +{ + return ctrl_xmit(q, mbuf); +} + +/** + * alloc_ring - allocate resources for an SGE descriptor ring + * @dev: the PCI device's core device + * @nelem: the number of descriptors + * @elem_size: the size of each descriptor + * @sw_size: the size of the SW state associated with each ring element + * @phys: the physical address of the allocated ring + * @metadata: address of the array holding the SW state for the ring + * @stat_size: extra space in HW ring for status information + * @node: preferred node for memory allocations + * + * Allocates resources for an SGE descriptor ring, such as Tx queues, + * free buffer lists, or response queues. Each SGE ring requires + * space for its HW descriptors plus, optionally, space for the SW state + * associated with each HW entry (the metadata). The function returns + * three values: the virtual address for the HW ring (the return value + * of the function), the bus address of the HW ring, and the address + * of the SW ring. + */ +static void *alloc_ring(size_t nelem, size_t elem_size, + size_t sw_size, dma_addr_t *phys, void *metadata, + size_t stat_size, __rte_unused uint16_t queue_id, + int socket_id, const char *z_name, + const char *z_name_sw) +{ + size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size; + const struct rte_memzone *tz; + void *s = NULL; + + dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; " + "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;" + " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size, + stat_size, queue_id, socket_id, z_name, z_name_sw); - if (!rss_hdr->filter_tid && rss_hdr->hash_type) { - mbuf->ol_flags |= PKT_RX_RSS_HASH; - mbuf->hash.rss = ntohl(rss_hdr->hash_val); + tz = rte_memzone_lookup(z_name); + if (tz) { + dev_debug(adapter, "%s: tz exists...returning existing..\n", + __func__); + goto alloc_sw_ring; } - if (pkt->vlan_ex) { - mbuf->ol_flags |= PKT_RX_VLAN_PKT; - mbuf->vlan_tci = ntohs(pkt->vlan); - } - rxq->stats.pkts++; - rxq->stats.rx_bytes += mbuf->pkt_len; + /* + * Allocate TX/RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = rte_memzone_reserve_aligned(z_name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, 4096); + if (!tz) + return NULL; - return 0; -} +alloc_sw_ring: + memset(tz->addr, 0, len); + if (sw_size) { + s = rte_zmalloc_socket(z_name_sw, nelem * sw_size, + RTE_CACHE_LINE_SIZE, socket_id); -/** - * restore_rx_bufs - put back a packet's Rx buffers - * @q: the SGE free list - * @frags: number of FL buffers to restore - * - * Puts back on an FL the Rx buffers. The buffers have already been - * unmapped and are left unmapped, we mark them so to prevent further - * unmapping attempts. - * - * This function undoes a series of @unmap_rx_buf calls when we find out - * that the current packet can't be processed right away afterall and we - * need to come back to it later. This is a very rare event and there's - * no effort to make this particularly efficient. - */ -static void restore_rx_bufs(struct sge_fl *q, int frags) -{ - while (frags--) { - if (q->cidx == 0) - q->cidx = q->size - 1; - else - q->cidx--; - q->avail++; + if (!s) { + dev_err(adapter, "%s: failed to get sw_ring memory\n", + __func__); + return NULL; + } } -} + if (metadata) + *(void **)metadata = s; -/** - * is_new_response - check if a response is newly written - * @r: the response descriptor - * @q: the response queue - * - * Returns true if a response descriptor contains a yet unprocessed - * response. - */ -static inline bool is_new_response(const struct rsp_ctrl *r, - const struct sge_rspq *q) -{ - return (r->u.type_gen >> S_RSPD_GEN) == q->gen; + *phys = (uint64_t)tz->iova; + return tz->addr; } #define CXGB4_MSG_AN ((void *)1) @@ -590,6 +1509,52 @@ static inline void rspq_next(struct sge_rspq *q) } } +static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype, + uint64_t ol_flags) +{ + pkt->packet_type |= ptype; + pkt->ol_flags |= ol_flags; +} + +static inline void cxgbe_fill_mbuf_info(struct adapter *adap, + const struct cpl_rx_pkt *cpl, + struct rte_mbuf *pkt) +{ + bool csum_ok; + u16 err_vec; + + if (adap->params.tp.rx_pkt_encap) + err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec)); + else + err_vec = ntohs(cpl->err_vec); + + csum_ok = cpl->csum_calc && !err_vec; + + if (cpl->vlan_ex) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); + else + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0); + + if (cpl->l2info & htonl(F_RXF_IP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4, + csum_ok ? PKT_RX_IP_CKSUM_GOOD : + PKT_RX_IP_CKSUM_BAD); + else if (cpl->l2info & htonl(F_RXF_IP6)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6, + csum_ok ? PKT_RX_IP_CKSUM_GOOD : + PKT_RX_IP_CKSUM_BAD); + + if (cpl->l2info & htonl(F_RXF_TCP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP, + csum_ok ? PKT_RX_L4_CKSUM_GOOD : + PKT_RX_L4_CKSUM_BAD); + else if (cpl->l2info & htonl(F_RXF_UDP)) + cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP, + csum_ok ? PKT_RX_L4_CKSUM_GOOD : + PKT_RX_L4_CKSUM_BAD); +} + /** * process_responses - process responses from an SGE response queue * @q: the ingress queue to process @@ -611,15 +1576,14 @@ static int process_responses(struct sge_rspq *q, int budget, int budget_left = budget; const struct rsp_ctrl *rc; struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); - struct adapter *adapter = q->adapter; while (likely(budget_left)) { + if (q->cidx == ntohs(q->stat->pidx)) + break; + rc = (const struct rsp_ctrl *) ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc))); - if (!is_new_response(rc, q)) - break; - /* * Ensure response has been read */ @@ -627,63 +1591,76 @@ static int process_responses(struct sge_rspq *q, int budget, rsp_type = G_RSPD_TYPE(rc->u.type_gen); if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) { - struct pkt_gl si; - const struct rx_sw_desc *rsd; - struct rte_mbuf *pkt = NULL; - u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; - - si.usembufs = rxq->usembufs; - /* - * In "use mbufs" mode, we don't pack multiple - * ingress packets per buffer (mbuf) so we - * should _always_ get a "New Buffer" flags - * from the SGE. Also, since we hand the - * mbuf's up to the host stack for it to - * eventually free, we don't release the mbuf's - * in the driver (in contrast to the "packed - * page" mode where the driver needs to - * release its reference on the page buffers). - */ - BUG_ON(!(len & F_RSPD_NEWBUF)); - len = G_RSPD_LEN(len); - si.tot_len = len; - - /* gather packet fragments */ - for (frags = 0; len; frags++) { - rsd = &rxq->fl.sdesc[rxq->fl.cidx]; - bufsz = min(get_buf_size(adapter, rsd), len); + struct sge *s = &q->adapter->sge; + unsigned int stat_pidx; + int stat_pidx_diff; + + stat_pidx = ntohs(q->stat->pidx); + stat_pidx_diff = P_IDXDIFF(q, stat_pidx); + while (stat_pidx_diff && budget_left) { + const struct rx_sw_desc *rsd = + &rxq->fl.sdesc[rxq->fl.cidx]; + const struct rss_header *rss_hdr = + (const void *)q->cur_desc; + const struct cpl_rx_pkt *cpl = + (const void *)&q->cur_desc[1]; + struct rte_mbuf *pkt, *npkt; + u32 len, bufsz; + + rc = (const struct rsp_ctrl *) + ((const char *)q->cur_desc + + (q->iqe_len - sizeof(*rc))); + + rsp_type = G_RSPD_TYPE(rc->u.type_gen); + if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF)) + break; + + len = ntohl(rc->pldbuflen_qid); + BUG_ON(!(len & F_RSPD_NEWBUF)); pkt = rsd->buf; - pkt->data_len = bufsz; - pkt->pkt_len = bufsz; - si.mbufs[frags] = pkt; - len -= bufsz; - unmap_rx_buf(&rxq->fl); - } - - si.va = RTE_PTR_ADD(si.mbufs[0]->buf_addr, - si.mbufs[0]->data_off); - rte_prefetch1(si.va); - - /* - * For the "use mbuf" case here, we can end up - * chewing through our Free List very rapidly - * with one entry per Ingress packet getting - * consumed. So if the handler() successfully - * consumed the mbuf, check to see if we can - * refill the Free List incrementally in the - * loop ... - */ - si.nfrags = frags; - ret = q->handler(q, q->cur_desc, &si); - - if (unlikely(ret != 0)) { - restore_rx_bufs(&rxq->fl, frags); - } else { + npkt = pkt; + len = G_RSPD_LEN(len); + pkt->pkt_len = len; + + /* Chain mbufs into len if necessary */ + while (len) { + struct rte_mbuf *new_pkt = rsd->buf; + + bufsz = min(get_buf_size(q->adapter, + rsd), len); + new_pkt->data_len = bufsz; + unmap_rx_buf(&rxq->fl); + len -= bufsz; + npkt->next = new_pkt; + npkt = new_pkt; + pkt->nb_segs++; + rsd = &rxq->fl.sdesc[rxq->fl.cidx]; + } + npkt->next = NULL; + pkt->nb_segs--; + + cxgbe_fill_mbuf_info(q->adapter, cpl, pkt); + + if (!rss_hdr->filter_tid && + rss_hdr->hash_type) { + pkt->ol_flags |= PKT_RX_RSS_HASH; + pkt->hash.rss = + ntohl(rss_hdr->hash_val); + } + + if (cpl->vlan_ex) + pkt->vlan_tci = ntohs(cpl->vlan); + + rte_pktmbuf_adj(pkt, s->pktshift); + rxq->stats.pkts++; + rxq->stats.rx_bytes += pkt->pkt_len; rx_pkts[budget - budget_left] = pkt; - if (fl_cap(&rxq->fl) - rxq->fl.avail >= 8) - __refill_fl(q->adapter, &rxq->fl); - } + rspq_next(q); + budget_left--; + stat_pidx_diff--; + } + continue; } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) { ret = q->handler(q, q->cur_desc, NULL); } else { @@ -706,7 +1683,7 @@ static int process_responses(struct sge_rspq *q, int budget, * refill the Free List. */ - if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 8) + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) __refill_fl(q->adapter, &rxq->fl); return budget - budget_left; @@ -715,37 +1692,42 @@ static int process_responses(struct sge_rspq *q, int budget, int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, unsigned int budget, unsigned int *work_done) { + struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); + unsigned int cidx_inc; unsigned int params; u32 val; - int err = 0; *work_done = process_responses(q, budget, rx_pkts); - params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX); - q->next_intr_params = params; - val = V_CIDXINC(*work_done) | V_SEINTARM(params); if (*work_done) { - /* - * If we don't have access to the new User GTS (T5+), - * use the old doorbell mechanism; otherwise use the new - * BAR2 mechanism. - */ - if (unlikely(!q->bar2_addr)) - t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS), + cidx_inc = R_IDXDIFF(q, gts_idx); + + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64) + __refill_fl(q->adapter, &rxq->fl); + + params = q->intr_params; + q->next_intr_params = params; + val = V_CIDXINC(cidx_inc) | V_SEINTARM(params); + + if (unlikely(!q->bar2_addr)) { + u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) : + T4VF_SGE_BASE_ADDR + + A_SGE_VF_GTS; + + t4_write_reg(q->adapter, reg, val | V_INGRESSQID((u32)q->cntxt_id)); - else { + } else { writel(val | V_INGRESSQID(q->bar2_qid), - (void *)((uintptr_t)q->bar2_addr + - SGE_UDB_GTS)); - /* - * This Write memory Barrier will force the write to - * the User Doorbell area to be flushed. + (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS)); + /* This Write memory Barrier will force the + * write to the User Doorbell area to be + * flushed. */ wmb(); } + q->gts_idx = q->cidx; } - - return err; + return 0; } /** @@ -805,17 +1787,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, int ret, flsz = 0; struct fw_iq_cmd c; struct sge *s = &adap->sge; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; char z_name[RTE_MEMZONE_NAMESIZE]; char z_name_sw[RTE_MEMZONE_NAMESIZE]; unsigned int nb_refill; + u8 pciechan; /* Size needs to be multiple of 16, including status entry. */ - iq->size = roundup(iq->size, 16); + iq->size = cxgbe_roundup(iq->size, 16); - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring", - eth_dev->data->port_id, queue_id); + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, + fwevtq ? "fwq_ring" : "rx_ring"); snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0, @@ -825,8 +1808,23 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, memset(&c, 0, sizeof(c)); c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | - F_FW_CMD_WRITE | F_FW_CMD_EXEC | - V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0)); + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) | + V_FW_IQ_CMD_VFN(0)); + if (cong >= 0) + c.iqns_to_fl0congen = + htonl(F_FW_IQ_CMD_IQFLINTCONGEN | + V_FW_IQ_CMD_IQTYPE(cong ? + FW_IQ_IQTYPE_NIC : + FW_IQ_IQTYPE_OFLD) | + F_FW_IQ_CMD_IQRO); + } else { + pciechan = pi->port_id; + } + c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | (sizeof(c) / 16)); c.type_to_iqandstindex = @@ -834,23 +1832,21 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, V_FW_IQ_CMD_IQASYNCH(fwevtq) | V_FW_IQ_CMD_VIID(pi->viid) | V_FW_IQ_CMD_IQANDST(intr_idx < 0) | - V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) | + V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) | V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx : -intr_idx - 1)); c.iqdroprss_to_iqesize = - htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | + htons(V_FW_IQ_CMD_IQPCIECH(pciechan) | F_FW_IQ_CMD_IQGTSMODE | V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) | V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4)); c.iqsize = htons(iq->size); c.iqaddr = cpu_to_be64(iq->phys_addr); - if (cong >= 0) - c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN); if (fl) { struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq, fl); - enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); + unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); /* * Allocate the ring for the hardware free list (with space @@ -862,12 +1858,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, */ if (fl->size < s->fl_starve_thres - 1 + 2 * 8) fl->size = s->fl_starve_thres - 1 + 2 * 8; - fl->size = roundup(fl->size, 8); + fl->size = cxgbe_roundup(fl->size, 8); - snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - eth_dev->driver->pci_drv.name, - fwevtq ? "fwq_ring" : "fl_ring", - eth_dev->data->port_id, queue_id); + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, + fwevtq ? "fwq_ring" : "fl_ring"); snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); fl->desc = alloc_ring(fl->size, sizeof(__be64), @@ -885,7 +1880,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 0 : F_FW_IQ_CMD_FL0PACKEN) | F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | F_FW_IQ_CMD_FL0PADEN); - if (cong >= 0) + if (is_pf4(adap) && cong >= 0) c.iqns_to_fl0congen |= htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | F_FW_IQ_CMD_FL0CONGCIF | @@ -896,19 +1891,26 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * Hence maximum allowed burst size will be 448 bytes. */ c.fl0dcaen_to_fl0cidxfthresh = - htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | - V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ? - X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); + htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ? + X_FETCHBURSTMIN_128B : + X_FETCHBURSTMIN_64B) | + V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ? + X_FETCHBURSTMAX_512B : + X_FETCHBURSTMAX_256B)); c.fl0size = htons(flsz); c.fl0addr = cpu_to_be64(fl->addr); } - ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); if (ret) goto err; iq->cur_desc = iq->desc; iq->cidx = 0; + iq->gts_idx = 0; iq->gen = 1; iq->next_intr_params = iq->intr_params; iq->cntxt_id = ntohs(c.iqid); @@ -916,8 +1918,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS, &iq->bar2_qid); iq->size--; /* subtract status entry */ + iq->stat = (void *)&iq->desc[iq->size * 8]; iq->eth_dev = eth_dev; iq->handler = hnd; + iq->port_id = pi->pidx; iq->mb_pool = mp; /* set offset to -1 to distinguish ingress queues without FL */ @@ -957,7 +1961,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, * a lot easier to fix in one place ... For now we do something very * simple (and hopefully less wrong). */ - if (!is_t4(adap->params.chip) && cong >= 0) { + if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) { u32 param, val; int i; @@ -986,7 +1990,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, refill_fl_err: t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, - iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); + iq->cntxt_id, fl->cntxt_id, 0xffff); fl_nomem: ret = -ENOMEM; err: @@ -1004,6 +2008,191 @@ err: return ret; } +static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id, + unsigned int abs_id) +{ + q->cntxt_id = id; + q->abs_id = abs_id; + q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS, + &q->bar2_qid); + q->cidx = 0; + q->pidx = 0; + q->dbidx = 0; + q->in_use = 0; + q->equeidx = 0; + q->coalesce.idx = 0; + q->coalesce.len = 0; + q->coalesce.flits = 0; + q->last_coal_idx = 0; + q->last_pidx = 0; + q->stat = (void *)&q->desc[q->size]; +} + +int t4_sge_eth_txq_start(struct sge_eth_txq *txq) +{ + /* + * TODO: For flow-control, queue may be stopped waiting to reclaim + * credits. + * Ensure queue is in EQ_STOPPED state before starting it. + */ + if (!(txq->flags & EQ_STOPPED)) + return -(EBUSY); + + txq->flags &= ~EQ_STOPPED; + + return 0; +} + +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq) +{ + txq->flags |= EQ_STOPPED; + + return 0; +} + +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_eth_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = eth_dev->data->dev_private; + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + u8 pciechan; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, "tx_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + sizeof(struct tx_sw_desc), &txq->q.phys_addr, + &txq->q.sdesc, s->stat_len, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC); + if (is_pf4(adap)) { + pciechan = pi->tx_chan; + c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) | + V_FW_EQ_ETH_CMD_VFN(0)); + } else { + pciechan = pi->port_id; + } + + c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC | + F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16)); + c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE | + V_FW_EQ_ETH_CMD_VIID(pi->viid)); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_ETH_CMD_PCIECHN(pciechan) | + F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_ETH_CMD_EQSIZE(nentries)); + c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr); + + if (is_pf4(adap)) + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + else + ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c); + if (ret) { + rte_free(txq->q.sdesc); + txq->q.sdesc = NULL; + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)), + G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd))); + txq->stats.tso = 0; + txq->stats.pkts = 0; + txq->stats.tx_cso = 0; + txq->stats.coal_wr = 0; + txq->stats.vlan_ins = 0; + txq->stats.tx_bytes = 0; + txq->stats.coal_pkts = 0; + txq->stats.mapping_err = 0; + txq->flags |= EQ_STOPPED; + txq->eth_dev = eth_dev; + txq->data = eth_dev->data; + t4_os_lock_init(&txq->txq_lock); + return 0; +} + +int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id) +{ + int ret, nentries; + struct fw_eq_ctrl_cmd c; + struct sge *s = &adap->sge; + struct port_info *pi = eth_dev->data->dev_private; + char z_name[RTE_MEMZONE_NAMESIZE]; + char z_name_sw[RTE_MEMZONE_NAMESIZE]; + + /* Add status entries */ + nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); + + snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + eth_dev->data->port_id, queue_id, "ctrl_tx_ring"); + snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name); + + txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc), + 0, &txq->q.phys_addr, + NULL, 0, queue_id, + socket_id, z_name, z_name_sw); + if (!txq->q.desc) + return -ENOMEM; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_EQ_CTRL_CMD_PFN(adap->pf) | + V_FW_EQ_CTRL_CMD_VFN(0)); + c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC | + F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16)); + c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0)); + c.physeqid_pkd = htonl(0); + c.fetchszm_to_iqid = + htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | + V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | + F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid)); + c.dcaen_to_eqsize = + htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | + V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | + V_FW_EQ_CTRL_CMD_EQSIZE(nentries)); + c.eqaddr = cpu_to_be64(txq->q.phys_addr); + + ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); + if (ret) { + txq->q.desc = NULL; + return ret; + } + + init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)), + G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd))); + txq->adapter = adap; + txq->full = 0; + return 0; +} + +static void free_txq(struct sge_txq *q) +{ + q->cntxt_id = 0; + q->sdesc = NULL; + q->desc = NULL; +} + static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl) { @@ -1024,6 +2213,35 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, } } +/* + * Clear all queues of the port + * + * Note: This function must only be called after rx and tx path + * of the port have been disabled. + */ +void t4_sge_eth_clear_queues(struct port_info *pi) +{ + int i; + struct adapter *adap = pi->adapter; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; + + for (i = 0; i < pi->n_rx_qsets; i++, rxq++) { + if (rxq->rspq.desc) + t4_sge_eth_rxq_stop(adap, &rxq->rspq); + } + for (i = 0; i < pi->n_tx_qsets; i++, txq++) { + if (txq->q.desc) { + struct sge_txq *q = &txq->q; + + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(q); + free_tx_desc(q, q->size); + q->equeidx = q->pidx; + } + } +} + void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) { if (rxq->rspq.desc) { @@ -1032,6 +2250,69 @@ void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq) } } +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq) +{ + if (txq->q.desc) { + t4_sge_eth_txq_stop(txq); + reclaim_completed_tx(&txq->q); + t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id); + free_tx_desc(&txq->q, txq->q.size); + rte_free(txq->q.sdesc); + free_txq(&txq->q); + } +} + +void t4_sge_tx_monitor_start(struct adapter *adap) +{ + rte_eal_alarm_set(50, tx_timer_cb, (void *)adap); +} + +void t4_sge_tx_monitor_stop(struct adapter *adap) +{ + rte_eal_alarm_cancel(tx_timer_cb, (void *)adap); +} + +/** + * t4_free_sge_resources - free SGE resources + * @adap: the adapter + * + * Frees resources used by the SGE queue sets. + */ +void t4_free_sge_resources(struct adapter *adap) +{ + unsigned int i; + struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0]; + struct sge_eth_txq *txq = &adap->sge.ethtxq[0]; + + /* clean up Ethernet Tx/Rx queues */ + for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) { + /* Free only the queues allocated */ + if (rxq->rspq.desc) { + t4_sge_eth_rxq_release(adap, rxq); + rxq->rspq.eth_dev = NULL; + } + if (txq->q.desc) { + t4_sge_eth_txq_release(adap, txq); + txq->eth_dev = NULL; + } + } + + /* clean up control Tx queues */ + for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { + struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; + + if (cq->q.desc) { + reclaim_completed_tx_imm(&cq->q); + t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, + cq->q.cntxt_id); + free_txq(&cq->q); + } + } + + if (adap->sge.fw_evtq.desc) + free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); +} + /** * t4_sge_init - initialize SGE * @adap: the adapter @@ -1099,7 +2380,7 @@ static int t4_sge_init_soft(struct adapter *adap) * The Page Size Buffer must be exactly equal to our Page Size and the * Large Page Size Buffer should be 0 (per above) or a power of 2. */ - if (fl_small_pg != PAGE_SIZE || + if (fl_small_pg != CXGBE_PAGE_SIZE || (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n", fl_small_pg, fl_large_pg); @@ -1157,8 +2438,7 @@ static int t4_sge_init_soft(struct adapter *adap) int t4_sge_init(struct adapter *adap) { struct sge *s = &adap->sge; - u32 sge_control, sge_control2, sge_conm_ctrl; - unsigned int ingpadboundary, ingpackboundary; + u32 sge_control, sge_conm_ctrl; int ret, egress_threshold; /* @@ -1168,34 +2448,7 @@ int t4_sge_init(struct adapter *adap) sge_control = t4_read_reg(adap, A_SGE_CONTROL); s->pktshift = G_PKTSHIFT(sge_control); s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64; - - /* - * T4 uses a single control field to specify both the PCIe Padding and - * Packing Boundary. T5 introduced the ability to specify these - * separately. The actual Ingress Packet Data alignment boundary - * within Packed Buffer Mode is the maximum of these two - * specifications. - */ - ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + - X_INGPADBOUNDARY_SHIFT); - s->fl_align = ingpadboundary; - - if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) { - /* - * T5 has a weird interpretation of one of the PCIe Packing - * Boundary values. No idea why ... - */ - sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2); - ingpackboundary = G_INGPACKBOUNDARY(sge_control2); - if (ingpackboundary == X_INGPACKBOUNDARY_16B) - ingpackboundary = 16; - else - ingpackboundary = 1 << (ingpackboundary + - X_INGPACKBOUNDARY_SHIFT); - - s->fl_align = max(ingpadboundary, ingpackboundary); - } - + s->fl_align = t4_fl_pkt_align(adap); ret = t4_sge_init_soft(adap); if (ret < 0) { dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n", @@ -1224,3 +2477,182 @@ int t4_sge_init(struct adapter *adap) return 0; } + +int t4vf_sge_init(struct adapter *adap) +{ + struct sge_params *sge_params = &adap->params.sge; + u32 sge_ingress_queues_per_page; + u32 sge_egress_queues_per_page; + u32 sge_control, sge_control2; + u32 fl_small_pg, fl_large_pg; + u32 sge_ingress_rx_threshold; + u32 sge_timer_value_0_and_1; + u32 sge_timer_value_2_and_3; + u32 sge_timer_value_4_and_5; + u32 sge_congestion_control; + struct sge *s = &adap->sge; + unsigned int s_hps, s_qpp; + u32 sge_host_page_size; + u32 params[7], vals[7]; + int v; + + /* query basic params from fw */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE)); + params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0)); + params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1)); + params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1)); + params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3)); + params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5)); + v = t4vf_query_params(adap, 7, params, vals); + if (v != FW_SUCCESS) + return v; + + sge_control = vals[0]; + sge_host_page_size = vals[1]; + fl_small_pg = vals[2]; + fl_large_pg = vals[3]; + sge_timer_value_0_and_1 = vals[4]; + sge_timer_value_2_and_3 = vals[5]; + sge_timer_value_4_and_5 = vals[6]; + + /* + * Start by vetting the basic SGE parameters which have been set up by + * the Physical Function Driver. + */ + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != CXGBE_PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { + dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", + fl_small_pg, fl_large_pg); + return -EINVAL; + } + + if ((sge_control & F_RXPKTCPLMODE) != + V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) { + dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); + return -EINVAL; + } + + + /* Grab ingress packing boundary from SGE_CONTROL2 for */ + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2)); + v = t4vf_query_params(adap, 1, params, vals); + if (v != FW_SUCCESS) { + dev_err(adapter, "Unable to get SGE Control2; " + "probably old firmware.\n"); + return v; + } + sge_control2 = vals[0]; + + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) + return v; + sge_ingress_rx_threshold = vals[0]; + sge_congestion_control = vals[1]; + params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF)); + params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | + V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF)); + v = t4vf_query_params(adap, 2, params, vals); + if (v != FW_SUCCESS) { + dev_warn(adap, "Unable to get VF SGE Queues/Page; " + "probably old firmware.\n"); + return v; + } + sge_egress_queues_per_page = vals[0]; + sge_ingress_queues_per_page = vals[1]; + + /* + * We need the Queues/Page for our VF. This is based on the + * PF from which we're instantiated and is indexed in the + * register we just read. + */ + s_hps = (S_HOSTPAGESIZEPF0 + + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf); + sge_params->hps = + ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0); + + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf); + sge_params->eq_qpp = + ((sge_egress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + sge_params->iq_qpp = + ((sge_ingress_queues_per_page >> s_qpp) + & M_QUEUESPERPAGEPF0); + + /* + * Now translate the queried parameters into our internal forms. + */ + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; + s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE) + ? 128 : 64); + s->pktshift = G_PKTSHIFT(sge_control); + s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2); + + /* + * A FL with <= fl_starve_thres buffers is starving and a periodic + * timer will attempt to refill it. This needs to be larger than the + * SGE's Egress Congestion Threshold. If it isn't, then we can get + * stuck waiting for new packets while the SGE is waiting for us to + * give it more Free List entries. (Note that the SGE's Egress + * Congestion Threshold is in units of 2 Free List pointers.) + */ + switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { + case CHELSIO_T5: + s->fl_starve_thres = + G_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + case CHELSIO_T6: + default: + s->fl_starve_thres = + G_T6_EGRTHRESHOLDPACKING(sge_congestion_control); + break; + } + s->fl_starve_thres = s->fl_starve_thres * 2 + 1; + + /* + * Save RX interrupt holdoff timer values and counter + * threshold values from the SGE parameters. + */ + s->timer_val[0] = core_ticks_to_us(adap, + G_TIMERVALUE0(sge_timer_value_0_and_1)); + s->timer_val[1] = core_ticks_to_us(adap, + G_TIMERVALUE1(sge_timer_value_0_and_1)); + s->timer_val[2] = core_ticks_to_us(adap, + G_TIMERVALUE2(sge_timer_value_2_and_3)); + s->timer_val[3] = core_ticks_to_us(adap, + G_TIMERVALUE3(sge_timer_value_2_and_3)); + s->timer_val[4] = core_ticks_to_us(adap, + G_TIMERVALUE4(sge_timer_value_4_and_5)); + s->timer_val[5] = core_ticks_to_us(adap, + G_TIMERVALUE5(sge_timer_value_4_and_5)); + s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold); + s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold); + s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold); + s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold); + return 0; +}