4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_tailq.h>
57 #include <rte_alarm.h>
58 #include <rte_ether.h>
59 #include <rte_ethdev.h>
60 #include <rte_atomic.h>
61 #include <rte_malloc.h>
62 #include <rte_random.h>
70 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
71 struct sge_eth_txq *txq);
74 * Max number of Rx buffers we replenish at a time.
76 #define MAX_RX_REFILL 64U
78 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
81 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
84 #define MAX_IMM_TX_PKT_LEN 256
87 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
88 * per mbuf buffer). We currently only support two sizes for 1500- and
89 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
90 * much need for that ...
92 #define FL_MTU_SMALL 1500
93 #define FL_MTU_LARGE 9000
95 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
98 struct sge *s = &adapter->sge;
100 return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
104 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
105 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
108 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
109 * these to specify the buffer size as an index into the SGE Free List Buffer
110 * Size register array. We also use bit 4, when the buffer has been unmapped
111 * for DMA, but this is of course never sent to the hardware and is only used
112 * to prevent double unmappings. All of the above requires that the Free List
113 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
114 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
115 * Free List Buffer alignment is 32 bytes, this works out for us ...
118 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
119 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
120 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
123 * XXX We shouldn't depend on being able to use these indices.
124 * XXX Especially when some other Master PF has initialized the
125 * XXX adapter or we use the Firmware Configuration File. We
126 * XXX should really search through the Host Buffer Size register
127 * XXX array for the appropriately sized buffer indices.
129 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
130 RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
132 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
133 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
137 * txq_avail - return the number of available slots in a Tx queue
140 * Returns the number of descriptors in a Tx queue available to write new
143 static inline unsigned int txq_avail(const struct sge_txq *q)
145 return q->size - 1 - q->in_use;
148 static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
150 struct rte_mbuf *m = mbuf;
152 for (; m; m = m->next, addr++) {
153 *addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
164 * free_tx_desc - reclaims Tx descriptors and their buffers
165 * @q: the Tx queue to reclaim descriptors from
166 * @n: the number of descriptors to reclaim
168 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
169 * Tx buffers. Called with the Tx queue lock held.
171 static void free_tx_desc(struct sge_txq *q, unsigned int n)
173 struct tx_sw_desc *d;
174 unsigned int cidx = 0;
178 if (d->mbuf) { /* an SGL is present */
179 rte_pktmbuf_free(d->mbuf);
182 if (d->coalesce.idx) {
185 for (i = 0; i < d->coalesce.idx; i++) {
186 rte_pktmbuf_free(d->coalesce.mbuf[i]);
187 d->coalesce.mbuf[i] = NULL;
192 if (++cidx == q->size) {
196 RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
200 static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
202 unsigned int cidx = q->cidx;
205 if (++cidx == q->size)
212 * fl_cap - return the capacity of a free-buffer list
215 * Returns the capacity of a free-buffer list. The capacity is less than
216 * the size because one descriptor needs to be left unpopulated, otherwise
217 * HW will think the FL is empty.
219 static inline unsigned int fl_cap(const struct sge_fl *fl)
221 return fl->size - 8; /* 1 descriptor = 8 buffers */
225 * fl_starving - return whether a Free List is starving.
226 * @adapter: pointer to the adapter
229 * Tests specified Free List to see whether the number of buffers
230 * available to the hardware has falled below our "starvation"
233 static inline bool fl_starving(const struct adapter *adapter,
234 const struct sge_fl *fl)
236 const struct sge *s = &adapter->sge;
238 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
242 * free_rx_bufs - free the Rx buffers on an SGE free list
243 * @q: the SGE free list to free buffers from
244 * @n: how many buffers to free
246 * Release the next @n buffers on an SGE free-buffer Rx queue. The
247 * buffers must be made inaccessible to HW before calling this function.
249 static void free_rx_bufs(struct sge_fl *q, int n)
251 unsigned int cidx = q->cidx;
252 struct rx_sw_desc *d;
257 rte_pktmbuf_free(d->buf);
261 if (++cidx == q->size) {
271 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
272 * @q: the SGE free list
274 * Unmap the current buffer on an SGE free-buffer Rx queue. The
275 * buffer must be made inaccessible to HW before calling this function.
277 * This is similar to @free_rx_bufs above but does not free the buffer.
278 * Do note that the FL still loses any further access to the buffer.
280 static void unmap_rx_buf(struct sge_fl *q)
282 if (++q->cidx == q->size)
287 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
289 if (q->pend_cred >= 64) {
290 u32 val = adap->params.arch.sge_fl_db;
292 if (is_t4(adap->params.chip))
293 val |= V_PIDX(q->pend_cred / 8);
295 val |= V_PIDX_T5(q->pend_cred / 8);
298 * Make sure all memory writes to the Free List queue are
299 * committed before we tell the hardware about them.
304 * If we don't have access to the new User Doorbell (T5+), use
305 * the old doorbell mechanism; otherwise use the new BAR2
308 if (unlikely(!q->bar2_addr)) {
309 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
310 val | V_QID(q->cntxt_id));
312 writel(val | V_QID(q->bar2_qid),
313 (void *)((uintptr_t)q->bar2_addr +
317 * This Write memory Barrier will force the write to
318 * the User Doorbell area to be flushed.
326 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
330 sd->dma_addr = mapping; /* includes size low bits */
334 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
336 * @q: the ring to refill
337 * @n: the number of new buffers to allocate
339 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
340 * allocated with the supplied gfp flags. The caller must assure that
341 * @n does not exceed the queue's capacity. If afterwards the queue is
342 * found critically low mark it as starving in the bitmap of starving FLs.
344 * Returns the number of buffers allocated.
346 static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
349 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
350 unsigned int cred = q->avail;
351 __be64 *d = &q->desc[q->pidx];
352 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
353 unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
354 struct rte_mbuf *buf_bulk[n];
357 ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
358 if (unlikely(ret != 0)) {
359 dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
362 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
366 for (i = 0; i < n; i++) {
367 struct rte_mbuf *mbuf = buf_bulk[i];
371 dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
373 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
377 rte_mbuf_refcnt_set(mbuf, 1);
378 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
381 mbuf->port = rxq->rspq.port_id;
383 mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
384 mapping |= buf_size_idx;
385 *d++ = cpu_to_be64(mapping);
386 set_rx_sw_desc(sd, mbuf, mapping);
390 if (++q->pidx == q->size) {
397 out: cred = q->avail - cred;
398 q->pend_cred += cred;
401 if (unlikely(fl_starving(adap, q))) {
403 * Make sure data has been written to free list
413 * refill_fl - refill an SGE Rx buffer ring with mbufs
415 * @q: the ring to refill
416 * @n: the number of new buffers to allocate
418 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
419 * allocated with the supplied gfp flags. The caller must assure that
420 * @n does not exceed the queue's capacity. Returns the number of buffers
423 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
425 return refill_fl_usembufs(adap, q, n);
428 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
430 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
434 * Return the number of reclaimable descriptors in a Tx queue.
436 static inline int reclaimable(const struct sge_txq *q)
438 int hw_cidx = ntohs(q->stat->cidx);
442 return hw_cidx + q->size;
447 * reclaim_completed_tx - reclaims completed Tx descriptors
448 * @q: the Tx queue to reclaim completed descriptors from
450 * Reclaims Tx descriptors that the SGE has indicated it has processed.
452 void reclaim_completed_tx(struct sge_txq *q)
454 unsigned int avail = reclaimable(q);
457 /* reclaim as much as possible */
458 reclaim_tx_desc(q, avail);
460 avail = reclaimable(q);
465 * sgl_len - calculates the size of an SGL of the given capacity
466 * @n: the number of SGL entries
468 * Calculates the number of flits needed for a scatter/gather list that
469 * can hold the given number of entries.
471 static inline unsigned int sgl_len(unsigned int n)
474 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
475 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
476 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
477 * repeated sequences of { Length[i], Length[i+1], Address[i],
478 * Address[i+1] } (this ensures that all addresses are on 64-bit
479 * boundaries). If N is even, then Length[N+1] should be set to 0 and
480 * Address[N+1] is omitted.
482 * The following calculation incorporates all of the above. It's
483 * somewhat hard to follow but, briefly: the "+2" accounts for the
484 * first two flits which include the DSGL header, Length0 and
485 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
486 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
487 * finally the "+((n-1)&1)" adds the one remaining flit needed if
491 return (3 * n) / 2 + (n & 1) + 2;
495 * flits_to_desc - returns the num of Tx descriptors for the given flits
496 * @n: the number of flits
498 * Returns the number of Tx descriptors needed for the supplied number
501 static inline unsigned int flits_to_desc(unsigned int n)
503 return DIV_ROUND_UP(n, 8);
507 * is_eth_imm - can an Ethernet packet be sent as immediate data?
510 * Returns whether an Ethernet packet is small enough to fit as
511 * immediate data. Return value corresponds to the headroom required.
513 static inline int is_eth_imm(const struct rte_mbuf *m)
515 unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
516 sizeof(struct cpl_tx_pkt_lso_core) : 0;
518 hdrlen += sizeof(struct cpl_tx_pkt);
519 if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
526 * calc_tx_flits - calculate the number of flits for a packet Tx WR
529 * Returns the number of flits needed for a Tx WR for the given Ethernet
530 * packet, including the needed WR and CPL headers.
532 static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
538 * If the mbuf is small enough, we can pump it out as a work request
539 * with only immediate data. In that case we just have to have the
540 * TX Packet header plus the mbuf data in the Work Request.
543 hdrlen = is_eth_imm(m);
545 return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
548 * Otherwise, we're going to have to construct a Scatter gather list
549 * of the mbuf body and fragments. We also include the flits necessary
550 * for the TX Packet Work Request and CPL. We always have a firmware
551 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
552 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
553 * message or, if we're doing a Large Send Offload, an LSO CPL message
554 * with an embeded TX Packet Write CPL message.
556 flits = sgl_len(m->nb_segs);
558 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
559 sizeof(struct cpl_tx_pkt_lso_core) +
560 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
562 flits += (sizeof(struct fw_eth_tx_pkt_wr) +
563 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
568 * write_sgl - populate a scatter/gather list for a packet
570 * @q: the Tx queue we are writing into
571 * @sgl: starting location for writing the SGL
572 * @end: points right after the end of the SGL
573 * @start: start offset into mbuf main-body data to include in the SGL
574 * @addr: address of mapped region
576 * Generates a scatter/gather list for the buffers that make up a packet.
577 * The caller must provide adequate space for the SGL that will be written.
578 * The SGL includes all of the packet's page fragments and the data in its
579 * main body except for the first @start bytes. @sgl must be 16-byte
580 * aligned and within a Tx descriptor with available space. @end points
581 * write after the end of the SGL but does not account for any potential
582 * wrap around, i.e., @end > @sgl.
584 static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
585 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
586 const dma_addr_t *addr)
589 struct ulptx_sge_pair *to;
590 struct rte_mbuf *m = mbuf;
591 unsigned int nfrags = m->nb_segs;
592 struct ulptx_sge_pair buf[nfrags / 2];
594 len = m->data_len - start;
595 sgl->len0 = htonl(len);
596 sgl->addr0 = rte_cpu_to_be_64(addr[0]);
598 sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
599 V_ULPTX_NSGE(nfrags));
600 if (likely(--nfrags == 0))
603 * Most of the complexity below deals with the possibility we hit the
604 * end of the queue in the middle of writing the SGL. For this case
605 * only we create the SGL in a temporary buffer and then copy it.
607 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
609 for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
611 to->len[0] = rte_cpu_to_be_32(m->data_len);
612 to->addr[0] = rte_cpu_to_be_64(addr[++i]);
614 to->len[1] = rte_cpu_to_be_32(m->data_len);
615 to->addr[1] = rte_cpu_to_be_64(addr[++i]);
619 to->len[0] = rte_cpu_to_be_32(m->data_len);
620 to->len[1] = rte_cpu_to_be_32(0);
621 to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
623 if (unlikely((u8 *)end > (u8 *)q->stat)) {
624 unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
629 memcpy(sgl->sge, buf, part0);
630 part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
631 rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
632 end = RTE_PTR_ADD((void *)q->desc, part1);
634 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
638 #define IDXDIFF(head, tail, wrap) \
639 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
641 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
642 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
645 * ring_tx_db - ring a Tx queue's doorbell
648 * @n: number of new descriptors to give to HW
650 * Ring the doorbel for a Tx queue.
652 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
654 int n = Q_IDXDIFF(q, dbidx);
657 * Make sure that all writes to the TX Descriptors are committed
658 * before we tell the hardware about them.
663 * If we don't have access to the new User Doorbell (T5+), use the old
664 * doorbell mechanism; otherwise use the new BAR2 mechanism.
666 if (unlikely(!q->bar2_addr)) {
670 * For T4 we need to participate in the Doorbell Recovery
674 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
675 V_QID(q->cntxt_id) | val);
678 q->db_pidx = q->pidx;
680 u32 val = V_PIDX_T5(n);
683 * T4 and later chips share the same PIDX field offset within
684 * the doorbell, but T5 and later shrank the field in order to
685 * gain a bit for Doorbell Priority. The field was absurdly
686 * large in the first place (14 bits) so we just use the T5
687 * and later limits and warn if a Queue ID is too large.
689 WARN_ON(val & F_DBPRIO);
691 writel(val | V_QID(q->bar2_qid),
692 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
695 * This Write Memory Barrier will force the write to the User
696 * Doorbell area to be flushed. This is needed to prevent
697 * writes on different CPUs for the same queue from hitting
698 * the adapter out of order. This is required when some Work
699 * Requests take the Write Combine Gather Buffer path (user
700 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
701 * take the traditional path where we simply increment the
702 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
703 * hardware DMA read the actual Work Request.
711 * Figure out what HW csum a packet wants and return the appropriate control
714 static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
718 if (m->ol_flags & PKT_TX_IP_CKSUM) {
719 switch (m->ol_flags & PKT_TX_L4_MASK) {
720 case PKT_TX_TCP_CKSUM:
721 csum_type = TX_CSUM_TCPIP;
723 case PKT_TX_UDP_CKSUM:
724 csum_type = TX_CSUM_UDPIP;
733 if (likely(csum_type >= TX_CSUM_TCPIP)) {
734 int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
735 int eth_hdr_len = m->l2_len;
737 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
738 hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
740 hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
741 return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
745 * unknown protocol, disable HW csum
746 * and hope a bad packet is detected
748 return F_TXPKT_L4CSUM_DIS;
751 static inline void txq_advance(struct sge_txq *q, unsigned int n)
755 if (q->pidx >= q->size)
759 #define MAX_COALESCE_LEN 64000
761 static inline int wraps_around(struct sge_txq *q, int ndesc)
763 return (q->pidx + ndesc) > q->size ? 1 : 0;
766 static void tx_timer_cb(void *data)
768 struct adapter *adap = (struct adapter *)data;
769 struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
772 /* monitor any pending tx */
773 for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
774 t4_os_lock(&txq->txq_lock);
775 if (txq->q.coalesce.idx) {
776 if (txq->q.coalesce.idx == txq->q.last_coal_idx &&
777 txq->q.pidx == txq->q.last_pidx) {
778 ship_tx_pkt_coalesce_wr(adap, txq);
780 txq->q.last_coal_idx = txq->q.coalesce.idx;
781 txq->q.last_pidx = txq->q.pidx;
784 t4_os_unlock(&txq->txq_lock);
786 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
790 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
791 * @ adap: adapter structure
794 * writes the different fields of the pkts WR and sends it.
796 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
797 struct sge_eth_txq *txq)
800 struct sge_txq *q = &txq->q;
801 struct fw_eth_tx_pkts_wr *wr;
804 /* fill the pkts WR header */
805 wr = (void *)&q->desc[q->pidx];
806 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
808 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
809 ndesc = flits_to_desc(q->coalesce.flits);
810 wr->equiq_to_len16 = htonl(wr_mid);
811 wr->plen = cpu_to_be16(q->coalesce.len);
812 wr->npkt = q->coalesce.idx;
814 wr->type = q->coalesce.type;
816 /* zero out coalesce structure members */
818 q->coalesce.flits = 0;
821 txq_advance(q, ndesc);
822 txq->stats.coal_wr++;
823 txq->stats.coal_pkts += wr->npkt;
825 if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
826 q->equeidx = q->pidx;
827 wr_mid |= F_FW_WR_EQUEQ;
828 wr->equiq_to_len16 = htonl(wr_mid);
834 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
835 * @txq: tx queue where the mbuf is sent
836 * @mbuf: mbuf to be sent
837 * @nflits: return value for number of flits needed
838 * @adap: adapter structure
840 * This function decides if a packet should be coalesced or not.
842 static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
843 struct rte_mbuf *mbuf,
844 unsigned int *nflits,
845 struct adapter *adap)
847 struct sge_txq *q = &txq->q;
848 unsigned int flits, ndesc;
849 unsigned char type = 0;
850 int credits, hw_cidx = ntohs(q->stat->cidx);
851 int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
853 /* use coal WR type 1 when no frags are present */
854 type = (mbuf->nb_segs == 1) ? 1 : 0;
859 if (unlikely(type != q->coalesce.type && q->coalesce.idx))
860 ship_tx_pkt_coalesce_wr(adap, txq);
862 /* calculate the number of flits required for coalescing this packet
863 * without the 2 flits of the WR header. These are added further down
864 * if we are just starting in new PKTS WR. sgl_len doesn't account for
865 * the possible 16 bytes alignment ULP TX commands so we do it here.
867 flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
869 flits += (sizeof(struct ulp_txpkt) +
870 sizeof(struct ulptx_idata)) / sizeof(__be64);
871 flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
874 /* If coalescing is on, the mbuf is added to a pkts WR */
875 if (q->coalesce.idx) {
876 ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
877 credits = txq_avail(q) - ndesc;
879 /* If we are wrapping or this is last mbuf then, send the
880 * already coalesced mbufs and let the non-coalesce pass
883 if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
884 ship_tx_pkt_coalesce_wr(adap, txq);
888 /* If the max coalesce len or the max WR len is reached
889 * ship the WR and keep coalescing on.
891 if (unlikely((q->coalesce.len + mbuf->pkt_len >
893 (q->coalesce.flits + flits >
895 ship_tx_pkt_coalesce_wr(adap, txq);
902 /* start a new pkts WR, the WR header is not filled below */
903 flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
904 ndesc = flits_to_desc(q->coalesce.flits + flits);
905 credits = txq_avail(q) - ndesc;
907 if (unlikely(credits < 0 || wraps_around(q, ndesc)))
909 q->coalesce.flits += 2;
910 q->coalesce.type = type;
911 q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
917 * tx_do_packet_coalesce - add an mbuf to a coalesce WR
918 * @txq: sge_eth_txq used send the mbuf
919 * @mbuf: mbuf to be sent
920 * @flits: flits needed for this mbuf
921 * @adap: adapter structure
922 * @pi: port_info structure
923 * @addr: mapped address of the mbuf
925 * Adds an mbuf to be sent as part of a coalesce WR by filling a
926 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
927 * ulp_tx_sc_dsgl command.
929 static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
930 struct rte_mbuf *mbuf,
931 int flits, struct adapter *adap,
932 const struct port_info *pi,
936 struct sge_txq *q = &txq->q;
937 struct ulp_txpkt *mc;
938 struct ulptx_idata *sc_imm;
939 struct cpl_tx_pkt_core *cpl;
940 struct tx_sw_desc *sd;
941 unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
943 if (q->coalesce.type == 0) {
944 mc = (struct ulp_txpkt *)q->coalesce.ptr;
945 mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
946 V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
948 mc->len = htonl(DIV_ROUND_UP(flits, 2));
949 sc_imm = (struct ulptx_idata *)(mc + 1);
950 sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
952 sc_imm->len = htonl(sizeof(*cpl));
953 end = (u64 *)mc + flits;
954 cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
956 end = (u64 *)q->coalesce.ptr + flits;
957 cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
960 /* update coalesce structure for this txq */
961 q->coalesce.flits += flits;
962 q->coalesce.ptr += flits * sizeof(__be64);
963 q->coalesce.len += mbuf->pkt_len;
965 /* fill the cpl message, same as in t4_eth_xmit, this should be kept
966 * similar to t4_eth_xmit
968 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
969 cntrl = hwcsum(adap->params.chip, mbuf) |
973 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
976 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
977 txq->stats.vlan_ins++;
978 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
981 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
982 V_TXPKT_INTF(pi->tx_chan) |
983 V_TXPKT_PF(adap->pf));
984 cpl->pack = htons(0);
985 cpl->len = htons(len);
986 cpl->ctrl1 = cpu_to_be64(cntrl);
987 write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
989 txq->stats.tx_bytes += len;
991 sd = &q->sdesc[q->pidx + (idx >> 1)];
993 if (sd->coalesce.idx) {
996 for (i = 0; i < sd->coalesce.idx; i++) {
997 rte_pktmbuf_free(sd->coalesce.mbuf[i]);
998 sd->coalesce.mbuf[i] = NULL;
1003 /* store pointers to the mbuf and the sgl used in free_tx_desc.
1004 * each tx desc can hold two pointers corresponding to the value
1005 * of ETH_COALESCE_PKT_PER_DESC
1007 sd->coalesce.mbuf[idx & 1] = mbuf;
1008 sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
1009 sd->coalesce.idx = (idx & 1) + 1;
1011 /* send the coaelsced work request if max reached */
1012 if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
1013 ship_tx_pkt_coalesce_wr(adap, txq);
1018 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1019 * @txq: the egress queue
1022 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1024 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
1026 const struct port_info *pi;
1027 struct cpl_tx_pkt_lso_core *lso;
1028 struct adapter *adap;
1029 struct rte_mbuf *m = mbuf;
1030 struct fw_eth_tx_pkt_wr *wr;
1031 struct cpl_tx_pkt_core *cpl;
1032 struct tx_sw_desc *d;
1033 dma_addr_t addr[m->nb_segs];
1034 unsigned int flits, ndesc, cflits;
1035 int l3hdr_len, l4hdr_len, eth_xtra_len;
1042 /* Reject xmit if queue is stopped */
1043 if (unlikely(txq->flags & EQ_STOPPED))
1047 * The chip min packet length is 10 octets but play safe and reject
1048 * anything shorter than an Ethernet header.
1050 if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
1052 rte_pktmbuf_free(m);
1056 pi = (struct port_info *)txq->eth_dev->data->dev_private;
1059 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1060 /* align the end of coalesce WR to a 512 byte boundary */
1061 txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
1063 if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
1064 if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
1065 if (unlikely(map_mbuf(mbuf, addr) < 0)) {
1066 dev_warn(adap, "%s: mapping err for coalesce\n",
1068 txq->stats.mapping_err++;
1071 rte_prefetch0((volatile void *)addr);
1072 return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
1079 if (txq->q.coalesce.idx)
1080 ship_tx_pkt_coalesce_wr(adap, txq);
1082 flits = calc_tx_flits(m);
1083 ndesc = flits_to_desc(flits);
1084 credits = txq_avail(&txq->q) - ndesc;
1086 if (unlikely(credits < 0)) {
1087 dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
1088 __func__, txq->q.cntxt_id, credits);
1092 if (unlikely(map_mbuf(m, addr) < 0)) {
1093 txq->stats.mapping_err++;
1097 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1098 if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
1099 txq->q.equeidx = txq->q.pidx;
1100 wr_mid |= F_FW_WR_EQUEQ;
1103 wr = (void *)&txq->q.desc[txq->q.pidx];
1104 wr->equiq_to_len16 = htonl(wr_mid);
1105 wr->r3 = rte_cpu_to_be_64(0);
1106 end = (u64 *)wr + flits;
1109 len += sizeof(*cpl);
1110 lso = (void *)(wr + 1);
1111 v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
1112 l3hdr_len = m->l3_len;
1113 l4hdr_len = m->l4_len;
1114 eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
1115 len += sizeof(*lso);
1116 wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1117 V_FW_WR_IMMDLEN(len));
1118 lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
1119 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
1121 V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1122 V_LSO_IPHDR_LEN(l3hdr_len / 4) |
1123 V_LSO_TCPHDR_LEN(l4hdr_len / 4));
1124 lso->ipid_ofst = htons(0);
1125 lso->mss = htons(m->tso_segsz);
1126 lso->seqno_offset = htonl(0);
1127 if (is_t4(adap->params.chip))
1128 lso->len = htonl(m->pkt_len);
1130 lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
1131 cpl = (void *)(lso + 1);
1132 cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1133 V_TXPKT_IPHDR_LEN(l3hdr_len) |
1134 V_TXPKT_ETHHDR_LEN(eth_xtra_len);
1136 txq->stats.tx_cso += m->tso_segsz;
1138 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1139 txq->stats.vlan_ins++;
1140 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
1143 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
1144 V_TXPKT_INTF(pi->tx_chan) |
1145 V_TXPKT_PF(adap->pf));
1146 cpl->pack = htons(0);
1147 cpl->len = htons(m->pkt_len);
1148 cpl->ctrl1 = cpu_to_be64(cntrl);
1151 txq->stats.tx_bytes += m->pkt_len;
1152 last_desc = txq->q.pidx + ndesc - 1;
1153 if (last_desc >= (int)txq->q.size)
1154 last_desc -= txq->q.size;
1156 d = &txq->q.sdesc[last_desc];
1158 rte_pktmbuf_free(d->mbuf);
1161 write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1163 txq->q.sdesc[last_desc].mbuf = m;
1164 txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1165 txq_advance(&txq->q, ndesc);
1166 ring_tx_db(adap, &txq->q);
1171 * alloc_ring - allocate resources for an SGE descriptor ring
1172 * @dev: the PCI device's core device
1173 * @nelem: the number of descriptors
1174 * @elem_size: the size of each descriptor
1175 * @sw_size: the size of the SW state associated with each ring element
1176 * @phys: the physical address of the allocated ring
1177 * @metadata: address of the array holding the SW state for the ring
1178 * @stat_size: extra space in HW ring for status information
1179 * @node: preferred node for memory allocations
1181 * Allocates resources for an SGE descriptor ring, such as Tx queues,
1182 * free buffer lists, or response queues. Each SGE ring requires
1183 * space for its HW descriptors plus, optionally, space for the SW state
1184 * associated with each HW entry (the metadata). The function returns
1185 * three values: the virtual address for the HW ring (the return value
1186 * of the function), the bus address of the HW ring, and the address
1189 static void *alloc_ring(size_t nelem, size_t elem_size,
1190 size_t sw_size, dma_addr_t *phys, void *metadata,
1191 size_t stat_size, __rte_unused uint16_t queue_id,
1192 int socket_id, const char *z_name,
1193 const char *z_name_sw)
1195 size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
1196 const struct rte_memzone *tz;
1199 dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1200 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
1201 " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
1202 stat_size, queue_id, socket_id, z_name, z_name_sw);
1204 tz = rte_memzone_lookup(z_name);
1206 dev_debug(adapter, "%s: tz exists...returning existing..\n",
1212 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
1213 * handle the maximum ring size is allocated in order to allow for
1214 * resizing in later calls to the queue setup function.
1216 tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
1221 memset(tz->addr, 0, len);
1223 s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
1224 RTE_CACHE_LINE_SIZE, socket_id);
1227 dev_err(adapter, "%s: failed to get sw_ring memory\n",
1233 *(void **)metadata = s;
1235 *phys = (uint64_t)tz->phys_addr;
1240 * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
1241 * @gl: the gather list
1243 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1244 * %NULL if mbuf allocation failed.
1246 static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
1249 * If there's only one mbuf fragment, just return that.
1251 if (likely(gl->nfrags == 1))
1252 return gl->mbufs[0];
1258 * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
1259 * @gl: the gather list
1261 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1262 * %NULL if mbuf allocation failed.
1264 static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
1266 return t4_pktgl_to_mbuf_usembufs(gl);
1269 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
1270 ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
1273 * t4_ethrx_handler - process an ingress ethernet packet
1274 * @q: the response queue that received the packet
1275 * @rsp: the response queue descriptor holding the RX_PKT message
1276 * @si: the gather list of packet fragments
1278 * Process an ingress ethernet packet and deliver it to the stack.
1280 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1281 const struct pkt_gl *si)
1283 struct rte_mbuf *mbuf;
1284 const struct cpl_rx_pkt *pkt;
1285 const struct rss_header *rss_hdr;
1287 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1289 rss_hdr = (const void *)rsp;
1290 pkt = (const void *)&rsp[1];
1291 csum_ok = pkt->csum_calc && !pkt->err_vec;
1293 mbuf = t4_pktgl_to_mbuf(si);
1294 if (unlikely(!mbuf)) {
1295 rxq->stats.rx_drops++;
1299 mbuf->port = pkt->iff;
1300 if (pkt->l2info & htonl(F_RXF_IP)) {
1301 mbuf->packet_type = RTE_PTYPE_L3_IPV4;
1302 if (unlikely(!csum_ok))
1303 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1305 if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
1306 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1307 } else if (pkt->l2info & htonl(F_RXF_IP6)) {
1308 mbuf->packet_type = RTE_PTYPE_L3_IPV6;
1311 mbuf->port = pkt->iff;
1313 if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
1314 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1315 mbuf->hash.rss = ntohl(rss_hdr->hash_val);
1319 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
1320 mbuf->vlan_tci = ntohs(pkt->vlan);
1323 rxq->stats.rx_bytes += mbuf->pkt_len;
1329 * is_new_response - check if a response is newly written
1330 * @r: the response descriptor
1331 * @q: the response queue
1333 * Returns true if a response descriptor contains a yet unprocessed
1336 static inline bool is_new_response(const struct rsp_ctrl *r,
1337 const struct sge_rspq *q)
1339 return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
1342 #define CXGB4_MSG_AN ((void *)1)
1345 * rspq_next - advance to the next entry in a response queue
1348 * Updates the state of a response queue to advance it to the next entry.
1350 static inline void rspq_next(struct sge_rspq *q)
1352 q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
1353 if (unlikely(++q->cidx == q->size)) {
1356 q->cur_desc = q->desc;
1361 * process_responses - process responses from an SGE response queue
1362 * @q: the ingress queue to process
1363 * @budget: how many responses can be processed in this round
1364 * @rx_pkts: mbuf to put the pkts
1366 * Process responses from an SGE response queue up to the supplied budget.
1367 * Responses include received packets as well as control messages from FW
1370 * Additionally choose the interrupt holdoff time for the next interrupt
1371 * on this queue. If the system is under memory shortage use a fairly
1372 * long delay to help recovery.
1374 static int process_responses(struct sge_rspq *q, int budget,
1375 struct rte_mbuf **rx_pkts)
1377 int ret = 0, rsp_type;
1378 int budget_left = budget;
1379 const struct rsp_ctrl *rc;
1380 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1382 while (likely(budget_left)) {
1383 rc = (const struct rsp_ctrl *)
1384 ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
1386 if (!is_new_response(rc, q))
1390 * Ensure response has been read
1393 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1395 if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
1396 const struct rx_sw_desc *rsd =
1397 &rxq->fl.sdesc[rxq->fl.cidx];
1398 const struct rss_header *rss_hdr =
1399 (const void *)q->cur_desc;
1400 const struct cpl_rx_pkt *cpl =
1401 (const void *)&q->cur_desc[1];
1402 bool csum_ok = cpl->csum_calc && !cpl->err_vec;
1403 struct rte_mbuf *pkt;
1404 u32 len = ntohl(rc->pldbuflen_qid);
1406 BUG_ON(!(len & F_RSPD_NEWBUF));
1408 pkt->data_len = G_RSPD_LEN(len);
1409 pkt->pkt_len = pkt->data_len;
1410 unmap_rx_buf(&rxq->fl);
1412 if (cpl->l2info & htonl(F_RXF_IP)) {
1413 pkt->packet_type = RTE_PTYPE_L3_IPV4;
1414 if (unlikely(!csum_ok))
1415 pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1418 htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
1419 pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1420 } else if (cpl->l2info & htonl(F_RXF_IP6)) {
1421 pkt->packet_type = RTE_PTYPE_L3_IPV6;
1424 if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
1425 pkt->ol_flags |= PKT_RX_RSS_HASH;
1426 pkt->hash.rss = ntohl(rss_hdr->hash_val);
1430 pkt->ol_flags |= PKT_RX_VLAN_PKT;
1431 pkt->vlan_tci = ntohs(cpl->vlan);
1434 rxq->stats.rx_bytes += pkt->pkt_len;
1435 rx_pkts[budget - budget_left] = pkt;
1436 } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
1437 ret = q->handler(q, q->cur_desc, NULL);
1439 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1442 if (unlikely(ret)) {
1443 /* couldn't process descriptor, back off for recovery */
1444 q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1451 if (R_IDXDIFF(q, gts_idx) >= 64) {
1452 unsigned int cidx_inc = R_IDXDIFF(q, gts_idx);
1453 unsigned int params;
1456 if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1457 __refill_fl(q->adapter, &rxq->fl);
1458 params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
1459 q->next_intr_params = params;
1460 val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
1462 if (unlikely(!q->bar2_addr))
1463 t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
1465 V_INGRESSQID((u32)q->cntxt_id));
1467 writel(val | V_INGRESSQID(q->bar2_qid),
1468 (void *)((uintptr_t)q->bar2_addr +
1471 * This Write memory Barrier will force the
1472 * write to the User Doorbell area to be
1477 q->gts_idx = q->cidx;
1482 * If this is a Response Queue with an associated Free List and
1483 * there's room for another chunk of new Free List buffer pointers,
1484 * refill the Free List.
1487 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1488 __refill_fl(q->adapter, &rxq->fl);
1490 return budget - budget_left;
1493 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
1494 unsigned int budget, unsigned int *work_done)
1498 *work_done = process_responses(q, budget, rx_pkts);
1503 * bar2_address - return the BAR2 address for an SGE Queue's Registers
1504 * @adapter: the adapter
1505 * @qid: the SGE Queue ID
1506 * @qtype: the SGE Queue Type (Egress or Ingress)
1507 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
1509 * Returns the BAR2 address for the SGE Queue Registers associated with
1510 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
1511 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
1512 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
1513 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
1515 static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
1516 enum t4_bar2_qtype qtype,
1517 unsigned int *pbar2_qid)
1522 ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
1526 return adapter->bar2 + bar2_qoffset;
1529 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
1531 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1532 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1534 return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
1535 rq->cntxt_id, fl_id, 0xffff);
1538 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
1540 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1541 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1543 return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
1544 rq->cntxt_id, fl_id, 0xffff);
1548 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
1549 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
1551 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1552 struct rte_eth_dev *eth_dev, int intr_idx,
1553 struct sge_fl *fl, rspq_handler_t hnd, int cong,
1554 struct rte_mempool *mp, int queue_id, int socket_id)
1558 struct sge *s = &adap->sge;
1559 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1560 char z_name[RTE_MEMZONE_NAMESIZE];
1561 char z_name_sw[RTE_MEMZONE_NAMESIZE];
1562 unsigned int nb_refill;
1564 /* Size needs to be multiple of 16, including status entry. */
1565 iq->size = cxgbe_roundup(iq->size, 16);
1567 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1568 eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
1569 eth_dev->data->port_id, queue_id);
1570 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1572 iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
1573 queue_id, socket_id, z_name, z_name_sw);
1577 memset(&c, 0, sizeof(c));
1578 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1579 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1580 V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
1581 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1583 c.type_to_iqandstindex =
1584 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1585 V_FW_IQ_CMD_IQASYNCH(fwevtq) |
1586 V_FW_IQ_CMD_VIID(pi->viid) |
1587 V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
1588 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
1589 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1591 c.iqdroprss_to_iqesize =
1592 htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1593 F_FW_IQ_CMD_IQGTSMODE |
1594 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
1595 V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
1596 c.iqsize = htons(iq->size);
1597 c.iqaddr = cpu_to_be64(iq->phys_addr);
1599 c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
1602 struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
1604 enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION(
1608 * Allocate the ring for the hardware free list (with space
1609 * for its status page) along with the associated software
1610 * descriptor ring. The free list size needs to be a multiple
1611 * of the Egress Queue Unit and at least 2 Egress Units larger
1612 * than the SGE's Egress Congrestion Threshold
1613 * (fl_starve_thres - 1).
1615 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
1616 fl->size = s->fl_starve_thres - 1 + 2 * 8;
1617 fl->size = cxgbe_roundup(fl->size, 8);
1619 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1620 eth_dev->driver->pci_drv.name,
1621 fwevtq ? "fwq_ring" : "fl_ring",
1622 eth_dev->data->port_id, queue_id);
1623 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1625 fl->desc = alloc_ring(fl->size, sizeof(__be64),
1626 sizeof(struct rx_sw_desc),
1627 &fl->addr, &fl->sdesc, s->stat_len,
1628 queue_id, socket_id, z_name, z_name_sw);
1633 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
1634 c.iqns_to_fl0congen |=
1635 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1636 (unlikely(rxq->usembufs) ?
1637 0 : F_FW_IQ_CMD_FL0PACKEN) |
1638 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1639 F_FW_IQ_CMD_FL0PADEN);
1641 c.iqns_to_fl0congen |=
1642 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1643 F_FW_IQ_CMD_FL0CONGCIF |
1644 F_FW_IQ_CMD_FL0CONGEN);
1646 /* In T6, for egress queue type FL there is internal overhead
1647 * of 16B for header going into FLM module.
1648 * Hence maximum allowed burst size will be 448 bytes.
1650 c.fl0dcaen_to_fl0cidxfthresh =
1651 htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
1652 V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
1653 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
1654 c.fl0size = htons(flsz);
1655 c.fl0addr = cpu_to_be64(fl->addr);
1658 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1662 iq->cur_desc = iq->desc;
1666 iq->next_intr_params = iq->intr_params;
1667 iq->cntxt_id = ntohs(c.iqid);
1668 iq->abs_id = ntohs(c.physiqid);
1669 iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
1671 iq->size--; /* subtract status entry */
1672 iq->eth_dev = eth_dev;
1674 iq->port_id = pi->port_id;
1677 /* set offset to -1 to distinguish ingress queues without FL */
1678 iq->offset = fl ? 0 : -1;
1681 fl->cntxt_id = ntohs(c.fl0id);
1686 fl->alloc_failed = 0;
1689 * Note, we must initialize the BAR2 Free List User Doorbell
1690 * information before refilling the Free List!
1692 fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
1693 T4_BAR2_QTYPE_EGRESS,
1696 nb_refill = refill_fl(adap, fl, fl_cap(fl));
1697 if (nb_refill != fl_cap(fl)) {
1699 dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
1706 * For T5 and later we attempt to set up the Congestion Manager values
1707 * of the new RX Ethernet Queue. This should really be handled by
1708 * firmware because it's more complex than any host driver wants to
1709 * get involved with and it's different per chip and this is almost
1710 * certainly wrong. Formware would be wrong as well, but it would be
1711 * a lot easier to fix in one place ... For now we do something very
1712 * simple (and hopefully less wrong).
1714 if (!is_t4(adap->params.chip) && cong >= 0) {
1718 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1719 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1720 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
1722 val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
1724 val = V_CONMCTXT_CNGTPMODE(
1725 X_CONMCTXT_CNGTPMODE_CHANNEL);
1726 for (i = 0; i < 4; i++) {
1727 if (cong & (1 << i))
1728 val |= V_CONMCTXT_CNGCHMAP(1 <<
1732 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1735 dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1736 iq->cntxt_id, -ret);
1742 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1743 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
1752 if (fl && fl->desc) {
1753 rte_free(fl->sdesc);
1761 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
1764 q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
1771 q->coalesce.idx = 0;
1772 q->coalesce.len = 0;
1773 q->coalesce.flits = 0;
1774 q->last_coal_idx = 0;
1776 q->stat = (void *)&q->desc[q->size];
1779 int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
1782 * TODO: For flow-control, queue may be stopped waiting to reclaim
1784 * Ensure queue is in EQ_STOPPED state before starting it.
1786 if (!(txq->flags & EQ_STOPPED))
1789 txq->flags &= ~EQ_STOPPED;
1794 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
1796 txq->flags |= EQ_STOPPED;
1801 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1802 struct rte_eth_dev *eth_dev, uint16_t queue_id,
1803 unsigned int iqid, int socket_id)
1806 struct fw_eq_eth_cmd c;
1807 struct sge *s = &adap->sge;
1808 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1809 char z_name[RTE_MEMZONE_NAMESIZE];
1810 char z_name_sw[RTE_MEMZONE_NAMESIZE];
1812 /* Add status entries */
1813 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
1815 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1816 eth_dev->driver->pci_drv.name, "tx_ring",
1817 eth_dev->data->port_id, queue_id);
1818 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1820 txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
1821 sizeof(struct tx_sw_desc), &txq->q.phys_addr,
1822 &txq->q.sdesc, s->stat_len, queue_id,
1823 socket_id, z_name, z_name_sw);
1827 memset(&c, 0, sizeof(c));
1828 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1829 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1830 V_FW_EQ_ETH_CMD_PFN(adap->pf) |
1831 V_FW_EQ_ETH_CMD_VFN(0));
1832 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
1833 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
1834 c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
1835 V_FW_EQ_ETH_CMD_VIID(pi->viid));
1836 c.fetchszm_to_iqid =
1837 htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
1838 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
1839 F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
1841 htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1842 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1843 V_FW_EQ_ETH_CMD_EQSIZE(nentries));
1844 c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
1846 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1848 rte_free(txq->q.sdesc);
1849 txq->q.sdesc = NULL;
1854 init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
1856 txq->stats.pkts = 0;
1857 txq->stats.tx_cso = 0;
1858 txq->stats.coal_wr = 0;
1859 txq->stats.vlan_ins = 0;
1860 txq->stats.tx_bytes = 0;
1861 txq->stats.coal_pkts = 0;
1862 txq->stats.mapping_err = 0;
1863 txq->flags |= EQ_STOPPED;
1864 txq->eth_dev = eth_dev;
1865 t4_os_lock_init(&txq->txq_lock);
1869 static void free_txq(struct sge_txq *q)
1876 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
1879 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
1881 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1882 rq->cntxt_id, fl_id, 0xffff);
1888 free_rx_bufs(fl, fl->avail);
1889 rte_free(fl->sdesc);
1897 * Clear all queues of the port
1899 * Note: This function must only be called after rx and tx path
1900 * of the port have been disabled.
1902 void t4_sge_eth_clear_queues(struct port_info *pi)
1905 struct adapter *adap = pi->adapter;
1906 struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
1907 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
1909 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
1911 t4_sge_eth_rxq_stop(adap, &rxq->rspq);
1913 for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
1915 struct sge_txq *q = &txq->q;
1917 t4_sge_eth_txq_stop(txq);
1918 reclaim_completed_tx(q);
1919 free_tx_desc(q, q->size);
1920 q->equeidx = q->pidx;
1925 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
1927 if (rxq->rspq.desc) {
1928 t4_sge_eth_rxq_stop(adap, &rxq->rspq);
1929 free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
1933 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
1936 t4_sge_eth_txq_stop(txq);
1937 reclaim_completed_tx(&txq->q);
1938 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
1939 free_tx_desc(&txq->q, txq->q.size);
1940 rte_free(txq->q.sdesc);
1945 void t4_sge_tx_monitor_start(struct adapter *adap)
1947 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
1950 void t4_sge_tx_monitor_stop(struct adapter *adap)
1952 rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
1956 * t4_free_sge_resources - free SGE resources
1957 * @adap: the adapter
1959 * Frees resources used by the SGE queue sets.
1961 void t4_free_sge_resources(struct adapter *adap)
1964 struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
1965 struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
1967 /* clean up Ethernet Tx/Rx queues */
1968 for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
1969 /* Free only the queues allocated */
1970 if (rxq->rspq.desc) {
1971 t4_sge_eth_rxq_release(adap, rxq);
1972 rxq->rspq.eth_dev = NULL;
1975 t4_sge_eth_txq_release(adap, txq);
1976 txq->eth_dev = NULL;
1980 if (adap->sge.fw_evtq.desc)
1981 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
1985 * t4_sge_init - initialize SGE
1986 * @adap: the adapter
1988 * Performs SGE initialization needed every time after a chip reset.
1989 * We do not initialize any of the queues here, instead the driver
1990 * top-level must request those individually.
1992 * Called in two different modes:
1994 * 1. Perform actual hardware initialization and record hard-coded
1995 * parameters which were used. This gets used when we're the
1996 * Master PF and the Firmware Configuration File support didn't
1997 * work for some reason.
1999 * 2. We're not the Master PF or initialization was performed with
2000 * a Firmware Configuration File. In this case we need to grab
2001 * any of the SGE operating parameters that we need to have in
2002 * order to do our job and make sure we can live with them ...
2004 static int t4_sge_init_soft(struct adapter *adap)
2006 struct sge *s = &adap->sge;
2007 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2008 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2009 u32 ingress_rx_threshold;
2012 * Verify that CPL messages are going to the Ingress Queue for
2013 * process_responses() and that only packet data is going to the
2016 if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
2017 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2018 dev_err(adap, "bad SGE CPL MODE\n");
2023 * Validate the Host Buffer Register Array indices that we want to
2026 * XXX Note that we should really read through the Host Buffer Size
2027 * XXX register array and find the indices of the Buffer Sizes which
2028 * XXX meet our needs!
2030 #define READ_FL_BUF(x) \
2031 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
2033 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2034 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2035 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2036 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2039 * We only bother using the Large Page logic if the Large Page Buffer
2040 * is larger than our Page Size Buffer.
2042 if (fl_large_pg <= fl_small_pg)
2048 * The Page Size Buffer must be exactly equal to our Page Size and the
2049 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2051 if (fl_small_pg != CXGBE_PAGE_SIZE ||
2052 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2053 dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
2054 fl_small_pg, fl_large_pg);
2058 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2060 if (adap->use_unpacked_mode) {
2063 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
2064 dev_err(adap, "bad SGE FL small MTU %d\n",
2068 if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2069 dev_err(adap, "bad SGE FL large MTU %d\n",
2078 * Retrieve our RX interrupt holdoff timer values and counter
2079 * threshold values from the SGE parameters.
2081 timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
2082 timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
2083 timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
2084 s->timer_val[0] = core_ticks_to_us(adap,
2085 G_TIMERVALUE0(timer_value_0_and_1));
2086 s->timer_val[1] = core_ticks_to_us(adap,
2087 G_TIMERVALUE1(timer_value_0_and_1));
2088 s->timer_val[2] = core_ticks_to_us(adap,
2089 G_TIMERVALUE2(timer_value_2_and_3));
2090 s->timer_val[3] = core_ticks_to_us(adap,
2091 G_TIMERVALUE3(timer_value_2_and_3));
2092 s->timer_val[4] = core_ticks_to_us(adap,
2093 G_TIMERVALUE4(timer_value_4_and_5));
2094 s->timer_val[5] = core_ticks_to_us(adap,
2095 G_TIMERVALUE5(timer_value_4_and_5));
2097 ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
2098 s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
2099 s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
2100 s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
2101 s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
2106 int t4_sge_init(struct adapter *adap)
2108 struct sge *s = &adap->sge;
2109 u32 sge_control, sge_control2, sge_conm_ctrl;
2110 unsigned int ingpadboundary, ingpackboundary;
2111 int ret, egress_threshold;
2114 * Ingress Padding Boundary and Egress Status Page Size are set up by
2115 * t4_fixup_host_params().
2117 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
2118 s->pktshift = G_PKTSHIFT(sge_control);
2119 s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
2122 * T4 uses a single control field to specify both the PCIe Padding and
2123 * Packing Boundary. T5 introduced the ability to specify these
2124 * separately. The actual Ingress Packet Data alignment boundary
2125 * within Packed Buffer Mode is the maximum of these two
2128 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
2129 X_INGPADBOUNDARY_SHIFT);
2130 s->fl_align = ingpadboundary;
2132 if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
2134 * T5 has a weird interpretation of one of the PCIe Packing
2135 * Boundary values. No idea why ...
2137 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
2138 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
2139 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
2140 ingpackboundary = 16;
2142 ingpackboundary = 1 << (ingpackboundary +
2143 X_INGPACKBOUNDARY_SHIFT);
2145 s->fl_align = max(ingpadboundary, ingpackboundary);
2148 ret = t4_sge_init_soft(adap);
2150 dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
2156 * A FL with <= fl_starve_thres buffers is starving and a periodic
2157 * timer will attempt to refill it. This needs to be larger than the
2158 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2159 * stuck waiting for new packets while the SGE is waiting for us to
2160 * give it more Free List entries. (Note that the SGE's Egress
2161 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2162 * there was only a single field to control this. For T5 there's the
2163 * original field which now only applies to Unpacked Mode Free List
2164 * buffers and a new field which only applies to Packed Mode Free List
2167 sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
2168 if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
2169 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
2171 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
2172 s->fl_starve_thres = 2 * egress_threshold + 1;