1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
26 #include <rte_tailq.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <ethdev_driver.h>
31 #include <rte_malloc.h>
32 #include <rte_random.h>
35 #include "base/common.h"
36 #include "base/t4_regs.h"
37 #include "base/t4_msg.h"
40 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
41 struct sge_eth_txq *txq);
44 * Max number of Rx buffers we replenish at a time.
46 #define MAX_RX_REFILL 64U
48 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
51 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
54 #define MAX_IMM_TX_PKT_LEN 256
57 * Max size of a WR sent through a control Tx queue.
59 #define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
62 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
63 * per mbuf buffer). We currently only support two sizes for 1500- and
64 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
65 * much need for that ...
67 #define FL_MTU_SMALL 1500
68 #define FL_MTU_LARGE 9000
70 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
73 struct sge *s = &adapter->sge;
75 return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
79 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
80 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
83 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
84 * these to specify the buffer size as an index into the SGE Free List Buffer
85 * Size register array. We also use bit 4, when the buffer has been unmapped
86 * for DMA, but this is of course never sent to the hardware and is only used
87 * to prevent double unmappings. All of the above requires that the Free List
88 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
89 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
90 * Free List Buffer alignment is 32 bytes, this works out for us ...
93 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
94 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
95 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
98 * XXX We shouldn't depend on being able to use these indices.
99 * XXX Especially when some other Master PF has initialized the
100 * XXX adapter or we use the Firmware Configuration File. We
101 * XXX should really search through the Host Buffer Size register
102 * XXX array for the appropriately sized buffer indices.
104 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
105 RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
107 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
108 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
112 * txq_avail - return the number of available slots in a Tx queue
115 * Returns the number of descriptors in a Tx queue available to write new
118 static inline unsigned int txq_avail(const struct sge_txq *q)
120 return q->size - 1 - q->in_use;
123 static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
125 struct rte_mbuf *m = mbuf;
127 for (; m; m = m->next, addr++) {
128 *addr = m->buf_iova + rte_pktmbuf_headroom(m);
139 * free_tx_desc - reclaims Tx descriptors and their buffers
140 * @q: the Tx queue to reclaim descriptors from
141 * @n: the number of descriptors to reclaim
143 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
144 * Tx buffers. Called with the Tx queue lock held.
146 static void free_tx_desc(struct sge_txq *q, unsigned int n)
148 struct tx_sw_desc *d;
149 unsigned int cidx = 0;
153 if (d->mbuf) { /* an SGL is present */
154 rte_pktmbuf_free(d->mbuf);
157 if (d->coalesce.idx) {
160 for (i = 0; i < d->coalesce.idx; i++) {
161 rte_pktmbuf_free(d->coalesce.mbuf[i]);
162 d->coalesce.mbuf[i] = NULL;
167 if (++cidx == q->size) {
171 RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
175 static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
177 struct tx_sw_desc *d;
178 unsigned int cidx = q->cidx;
182 if (d->mbuf) { /* an SGL is present */
183 rte_pktmbuf_free(d->mbuf);
187 if (++cidx == q->size) {
196 * fl_cap - return the capacity of a free-buffer list
199 * Returns the capacity of a free-buffer list. The capacity is less than
200 * the size because one descriptor needs to be left unpopulated, otherwise
201 * HW will think the FL is empty.
203 static inline unsigned int fl_cap(const struct sge_fl *fl)
205 return fl->size - 8; /* 1 descriptor = 8 buffers */
209 * fl_starving - return whether a Free List is starving.
210 * @adapter: pointer to the adapter
213 * Tests specified Free List to see whether the number of buffers
214 * available to the hardware has falled below our "starvation"
217 static inline bool fl_starving(const struct adapter *adapter,
218 const struct sge_fl *fl)
220 const struct sge *s = &adapter->sge;
222 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
225 static inline unsigned int get_buf_size(struct adapter *adapter,
226 const struct rx_sw_desc *d)
228 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
229 unsigned int buf_size = 0;
231 switch (rx_buf_size_idx) {
232 case RX_SMALL_MTU_BUF:
233 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
236 case RX_LARGE_MTU_BUF:
237 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
249 * free_rx_bufs - free the Rx buffers on an SGE free list
250 * @q: the SGE free list to free buffers from
251 * @n: how many buffers to free
253 * Release the next @n buffers on an SGE free-buffer Rx queue. The
254 * buffers must be made inaccessible to HW before calling this function.
256 static void free_rx_bufs(struct sge_fl *q, int n)
258 unsigned int cidx = q->cidx;
259 struct rx_sw_desc *d;
264 rte_pktmbuf_free(d->buf);
268 if (++cidx == q->size) {
278 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
279 * @q: the SGE free list
281 * Unmap the current buffer on an SGE free-buffer Rx queue. The
282 * buffer must be made inaccessible to HW before calling this function.
284 * This is similar to @free_rx_bufs above but does not free the buffer.
285 * Do note that the FL still loses any further access to the buffer.
287 static void unmap_rx_buf(struct sge_fl *q)
289 if (++q->cidx == q->size)
294 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
296 if (q->pend_cred >= 64) {
297 u32 val = adap->params.arch.sge_fl_db;
299 if (is_t4(adap->params.chip))
300 val |= V_PIDX(q->pend_cred / 8);
302 val |= V_PIDX_T5(q->pend_cred / 8);
305 * Make sure all memory writes to the Free List queue are
306 * committed before we tell the hardware about them.
311 * If we don't have access to the new User Doorbell (T5+), use
312 * the old doorbell mechanism; otherwise use the new BAR2
315 if (unlikely(!q->bar2_addr)) {
316 u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
320 t4_write_reg_relaxed(adap, reg,
321 val | V_QID(q->cntxt_id));
323 writel_relaxed(val | V_QID(q->bar2_qid),
324 (void *)((uintptr_t)q->bar2_addr +
328 * This Write memory Barrier will force the write to
329 * the User Doorbell area to be flushed.
337 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
341 sd->dma_addr = mapping; /* includes size low bits */
345 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
347 * @q: the ring to refill
348 * @n: the number of new buffers to allocate
350 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
351 * allocated with the supplied gfp flags. The caller must assure that
352 * @n does not exceed the queue's capacity. If afterwards the queue is
353 * found critically low mark it as starving in the bitmap of starving FLs.
355 * Returns the number of buffers allocated.
357 static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
360 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
361 unsigned int cred = q->avail;
362 __be64 *d = &q->desc[q->pidx];
363 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
364 unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
365 struct rte_mbuf *buf_bulk[n];
367 struct rte_pktmbuf_pool_private *mbp_priv;
369 /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
370 mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
371 if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)
372 buf_size_idx = RX_LARGE_MTU_BUF;
374 ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
375 if (unlikely(ret != 0)) {
376 dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
379 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
383 for (i = 0; i < n; i++) {
384 struct rte_mbuf *mbuf = buf_bulk[i];
388 dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
390 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
394 rte_mbuf_refcnt_set(mbuf, 1);
397 RTE_PTR_ALIGN((char *)mbuf->buf_addr +
398 RTE_PKTMBUF_HEADROOM,
399 adap->sge.fl_align) -
400 (char *)mbuf->buf_addr);
403 mbuf->port = rxq->rspq.port_id;
405 mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
408 mapping |= buf_size_idx;
409 *d++ = cpu_to_be64(mapping);
410 set_rx_sw_desc(sd, mbuf, mapping);
414 if (++q->pidx == q->size) {
421 out: cred = q->avail - cred;
422 q->pend_cred += cred;
425 if (unlikely(fl_starving(adap, q))) {
427 * Make sure data has been written to free list
437 * refill_fl - refill an SGE Rx buffer ring with mbufs
439 * @q: the ring to refill
440 * @n: the number of new buffers to allocate
442 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
443 * allocated with the supplied gfp flags. The caller must assure that
444 * @n does not exceed the queue's capacity. Returns the number of buffers
447 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
449 return refill_fl_usembufs(adap, q, n);
452 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
454 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
458 * Return the number of reclaimable descriptors in a Tx queue.
460 static inline int reclaimable(const struct sge_txq *q)
462 int hw_cidx = ntohs(q->stat->cidx);
466 return hw_cidx + q->size;
471 * reclaim_completed_tx - reclaims completed Tx descriptors
472 * @q: the Tx queue to reclaim completed descriptors from
474 * Reclaims Tx descriptors that the SGE has indicated it has processed.
476 void reclaim_completed_tx(struct sge_txq *q)
478 unsigned int avail = reclaimable(q);
481 /* reclaim as much as possible */
482 reclaim_tx_desc(q, avail);
484 avail = reclaimable(q);
489 * sgl_len - calculates the size of an SGL of the given capacity
490 * @n: the number of SGL entries
492 * Calculates the number of flits needed for a scatter/gather list that
493 * can hold the given number of entries.
495 static inline unsigned int sgl_len(unsigned int n)
498 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
499 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
500 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
501 * repeated sequences of { Length[i], Length[i+1], Address[i],
502 * Address[i+1] } (this ensures that all addresses are on 64-bit
503 * boundaries). If N is even, then Length[N+1] should be set to 0 and
504 * Address[N+1] is omitted.
506 * The following calculation incorporates all of the above. It's
507 * somewhat hard to follow but, briefly: the "+2" accounts for the
508 * first two flits which include the DSGL header, Length0 and
509 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
510 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
511 * finally the "+((n-1)&1)" adds the one remaining flit needed if
515 return (3 * n) / 2 + (n & 1) + 2;
519 * flits_to_desc - returns the num of Tx descriptors for the given flits
520 * @n: the number of flits
522 * Returns the number of Tx descriptors needed for the supplied number
525 static inline unsigned int flits_to_desc(unsigned int n)
527 return DIV_ROUND_UP(n, 8);
531 * is_eth_imm - can an Ethernet packet be sent as immediate data?
534 * Returns whether an Ethernet packet is small enough to fit as
535 * immediate data. Return value corresponds to the headroom required.
537 static inline int is_eth_imm(const struct rte_mbuf *m)
539 unsigned int hdrlen = (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ?
540 sizeof(struct cpl_tx_pkt_lso_core) : 0;
542 hdrlen += sizeof(struct cpl_tx_pkt);
543 if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
550 * calc_tx_flits - calculate the number of flits for a packet Tx WR
552 * @adap: adapter structure pointer
554 * Returns the number of flits needed for a Tx WR for the given Ethernet
555 * packet, including the needed WR and CPL headers.
557 static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
558 struct adapter *adap)
560 size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
561 sizeof(struct fw_eth_tx_pkt_vm_wr);
566 * If the mbuf is small enough, we can pump it out as a work request
567 * with only immediate data. In that case we just have to have the
568 * TX Packet header plus the mbuf data in the Work Request.
571 hdrlen = is_eth_imm(m);
573 return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
576 * Otherwise, we're going to have to construct a Scatter gather list
577 * of the mbuf body and fragments. We also include the flits necessary
578 * for the TX Packet Work Request and CPL. We always have a firmware
579 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
580 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
581 * message or, if we're doing a Large Send Offload, an LSO CPL message
582 * with an embedded TX Packet Write CPL message.
584 flits = sgl_len(m->nb_segs);
586 flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
587 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
590 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
595 * write_sgl - populate a scatter/gather list for a packet
597 * @q: the Tx queue we are writing into
598 * @sgl: starting location for writing the SGL
599 * @end: points right after the end of the SGL
600 * @start: start offset into mbuf main-body data to include in the SGL
601 * @addr: address of mapped region
603 * Generates a scatter/gather list for the buffers that make up a packet.
604 * The caller must provide adequate space for the SGL that will be written.
605 * The SGL includes all of the packet's page fragments and the data in its
606 * main body except for the first @start bytes. @sgl must be 16-byte
607 * aligned and within a Tx descriptor with available space. @end points
608 * write after the end of the SGL but does not account for any potential
609 * wrap around, i.e., @end > @sgl.
611 static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
612 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
613 const dma_addr_t *addr)
616 struct ulptx_sge_pair *to;
617 struct rte_mbuf *m = mbuf;
618 unsigned int nfrags = m->nb_segs;
619 struct ulptx_sge_pair buf[nfrags / 2];
621 len = m->data_len - start;
622 sgl->len0 = htonl(len);
623 sgl->addr0 = rte_cpu_to_be_64(addr[0]);
625 sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
626 V_ULPTX_NSGE(nfrags));
627 if (likely(--nfrags == 0))
630 * Most of the complexity below deals with the possibility we hit the
631 * end of the queue in the middle of writing the SGL. For this case
632 * only we create the SGL in a temporary buffer and then copy it.
634 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
636 for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
638 to->len[0] = rte_cpu_to_be_32(m->data_len);
639 to->addr[0] = rte_cpu_to_be_64(addr[++i]);
641 to->len[1] = rte_cpu_to_be_32(m->data_len);
642 to->addr[1] = rte_cpu_to_be_64(addr[++i]);
646 to->len[0] = rte_cpu_to_be_32(m->data_len);
647 to->len[1] = rte_cpu_to_be_32(0);
648 to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
650 if (unlikely((u8 *)end > (u8 *)q->stat)) {
651 unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
656 memcpy(sgl->sge, buf, part0);
657 part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
658 rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
659 end = RTE_PTR_ADD((void *)q->desc, part1);
661 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
665 #define IDXDIFF(head, tail, wrap) \
666 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
668 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
669 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
671 #define PIDXDIFF(head, tail, wrap) \
672 ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
673 #define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
676 * ring_tx_db - ring a Tx queue's doorbell
679 * @n: number of new descriptors to give to HW
681 * Ring the doorbel for a Tx queue.
683 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
685 int n = Q_IDXDIFF(q, dbidx);
688 * Make sure that all writes to the TX Descriptors are committed
689 * before we tell the hardware about them.
694 * If we don't have access to the new User Doorbell (T5+), use the old
695 * doorbell mechanism; otherwise use the new BAR2 mechanism.
697 if (unlikely(!q->bar2_addr)) {
701 * For T4 we need to participate in the Doorbell Recovery
705 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
706 V_QID(q->cntxt_id) | val);
709 q->db_pidx = q->pidx;
711 u32 val = V_PIDX_T5(n);
714 * T4 and later chips share the same PIDX field offset within
715 * the doorbell, but T5 and later shrank the field in order to
716 * gain a bit for Doorbell Priority. The field was absurdly
717 * large in the first place (14 bits) so we just use the T5
718 * and later limits and warn if a Queue ID is too large.
720 WARN_ON(val & F_DBPRIO);
722 writel(val | V_QID(q->bar2_qid),
723 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
726 * This Write Memory Barrier will force the write to the User
727 * Doorbell area to be flushed. This is needed to prevent
728 * writes on different CPUs for the same queue from hitting
729 * the adapter out of order. This is required when some Work
730 * Requests take the Write Combine Gather Buffer path (user
731 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
732 * take the traditional path where we simply increment the
733 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
734 * hardware DMA read the actual Work Request.
742 * Figure out what HW csum a packet wants and return the appropriate control
745 static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
749 if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
750 switch (m->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
751 case RTE_MBUF_F_TX_TCP_CKSUM:
752 csum_type = TX_CSUM_TCPIP;
754 case RTE_MBUF_F_TX_UDP_CKSUM:
755 csum_type = TX_CSUM_UDPIP;
764 if (likely(csum_type >= TX_CSUM_TCPIP)) {
765 u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
766 int eth_hdr_len = m->l2_len;
768 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
769 hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
771 hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
772 return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
776 * unknown protocol, disable HW csum
777 * and hope a bad packet is detected
779 return F_TXPKT_L4CSUM_DIS;
782 static inline void txq_advance(struct sge_txq *q, unsigned int n)
786 if (q->pidx >= q->size)
790 #define MAX_COALESCE_LEN 64000
792 static inline int wraps_around(struct sge_txq *q, int ndesc)
794 return (q->pidx + ndesc) > q->size ? 1 : 0;
797 static void tx_timer_cb(void *data)
799 struct adapter *adap = (struct adapter *)data;
800 struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
802 unsigned int coal_idx;
804 /* monitor any pending tx */
805 for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
806 if (t4_os_trylock(&txq->txq_lock)) {
807 coal_idx = txq->q.coalesce.idx;
809 if (coal_idx == txq->q.last_coal_idx &&
810 txq->q.pidx == txq->q.last_pidx) {
811 ship_tx_pkt_coalesce_wr(adap, txq);
813 txq->q.last_coal_idx = coal_idx;
814 txq->q.last_pidx = txq->q.pidx;
817 t4_os_unlock(&txq->txq_lock);
820 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
824 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
825 * @ adap: adapter structure
828 * writes the different fields of the pkts WR and sends it.
830 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
831 struct sge_eth_txq *txq)
833 struct fw_eth_tx_pkts_vm_wr *vmwr;
834 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
835 sizeof(vmwr->ethmacsrc) +
836 sizeof(vmwr->ethtype) +
837 sizeof(vmwr->vlantci));
838 struct fw_eth_tx_pkts_wr *wr;
839 struct sge_txq *q = &txq->q;
843 /* fill the pkts WR header */
844 wr = (void *)&q->desc[q->pidx];
845 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
846 vmwr = (void *)&q->desc[q->pidx];
848 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
849 ndesc = flits_to_desc(q->coalesce.flits);
850 wr->equiq_to_len16 = htonl(wr_mid);
851 wr->plen = cpu_to_be16(q->coalesce.len);
852 wr->npkt = q->coalesce.idx;
855 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
856 wr->type = q->coalesce.type;
858 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
860 memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
864 /* zero out coalesce structure members */
865 memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
867 txq_advance(q, ndesc);
868 txq->stats.coal_wr++;
869 txq->stats.coal_pkts += wr->npkt;
871 if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
872 q->equeidx = q->pidx;
873 wr_mid |= F_FW_WR_EQUEQ;
874 wr->equiq_to_len16 = htonl(wr_mid);
880 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
881 * @txq: tx queue where the mbuf is sent
882 * @mbuf: mbuf to be sent
883 * @nflits: return value for number of flits needed
884 * @adap: adapter structure
886 * This function decides if a packet should be coalesced or not.
888 static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
889 struct rte_mbuf *mbuf,
890 unsigned int *nflits,
891 struct adapter *adap)
893 struct fw_eth_tx_pkts_vm_wr *wr;
894 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
895 sizeof(wr->ethmacsrc) +
896 sizeof(wr->ethtype) +
897 sizeof(wr->vlantci));
898 struct sge_txq *q = &txq->q;
899 unsigned int flits, ndesc;
900 unsigned char type = 0;
901 int credits, wr_size;
903 /* use coal WR type 1 when no frags are present */
904 type = (mbuf->nb_segs == 1) ? 1 : 0;
909 if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
910 rte_pktmbuf_mtod(mbuf, void *),
912 ship_tx_pkt_coalesce_wr(adap, txq);
915 if (unlikely(type != q->coalesce.type && q->coalesce.idx))
916 ship_tx_pkt_coalesce_wr(adap, txq);
918 /* calculate the number of flits required for coalescing this packet
919 * without the 2 flits of the WR header. These are added further down
920 * if we are just starting in new PKTS WR. sgl_len doesn't account for
921 * the possible 16 bytes alignment ULP TX commands so we do it here.
923 flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
925 flits += (sizeof(struct ulp_txpkt) +
926 sizeof(struct ulptx_idata)) / sizeof(__be64);
927 flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
930 /* If coalescing is on, the mbuf is added to a pkts WR */
931 if (q->coalesce.idx) {
932 ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
933 credits = txq_avail(q) - ndesc;
935 /* If we are wrapping or this is last mbuf then, send the
936 * already coalesced mbufs and let the non-coalesce pass
939 if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
940 ship_tx_pkt_coalesce_wr(adap, txq);
944 /* If the max coalesce len or the max WR len is reached
945 * ship the WR and keep coalescing on.
947 if (unlikely((q->coalesce.len + mbuf->pkt_len >
949 (q->coalesce.flits + flits >
951 ship_tx_pkt_coalesce_wr(adap, txq);
958 /* start a new pkts WR, the WR header is not filled below */
959 wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
960 sizeof(struct fw_eth_tx_pkts_vm_wr);
961 flits += wr_size / sizeof(__be64);
962 ndesc = flits_to_desc(q->coalesce.flits + flits);
963 credits = txq_avail(q) - ndesc;
965 if (unlikely(credits < 0 || wraps_around(q, ndesc)))
967 q->coalesce.flits += wr_size / sizeof(__be64);
968 q->coalesce.type = type;
969 q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
970 q->coalesce.flits * sizeof(__be64);
972 memcpy((void *)q->coalesce.ethmacdst,
973 rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
978 * tx_do_packet_coalesce - add an mbuf to a coalesce WR
979 * @txq: sge_eth_txq used send the mbuf
980 * @mbuf: mbuf to be sent
981 * @flits: flits needed for this mbuf
982 * @adap: adapter structure
983 * @pi: port_info structure
984 * @addr: mapped address of the mbuf
986 * Adds an mbuf to be sent as part of a coalesce WR by filling a
987 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
988 * ulp_tx_sc_dsgl command.
990 static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
991 struct rte_mbuf *mbuf,
992 int flits, struct adapter *adap,
993 const struct port_info *pi,
994 dma_addr_t *addr, uint16_t nb_pkts)
997 struct sge_txq *q = &txq->q;
998 struct ulp_txpkt *mc;
999 struct ulptx_idata *sc_imm;
1000 struct cpl_tx_pkt_core *cpl;
1001 struct tx_sw_desc *sd;
1002 unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
1004 if (q->coalesce.type == 0) {
1005 mc = (struct ulp_txpkt *)q->coalesce.ptr;
1006 mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
1007 V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
1009 mc->len = htonl(DIV_ROUND_UP(flits, 2));
1010 sc_imm = (struct ulptx_idata *)(mc + 1);
1011 sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1013 sc_imm->len = htonl(sizeof(*cpl));
1014 end = (u64 *)mc + flits;
1015 cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
1017 end = (u64 *)q->coalesce.ptr + flits;
1018 cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
1021 /* update coalesce structure for this txq */
1022 q->coalesce.flits += flits;
1023 q->coalesce.ptr += flits * sizeof(__be64);
1024 q->coalesce.len += mbuf->pkt_len;
1026 /* fill the cpl message, same as in t4_eth_xmit, this should be kept
1027 * similar to t4_eth_xmit
1029 if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
1030 cntrl = hwcsum(adap->params.chip, mbuf) |
1032 txq->stats.tx_cso++;
1034 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1037 if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
1038 txq->stats.vlan_ins++;
1039 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
1042 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
1044 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
1045 V_TXPKT_PF(adap->pf));
1047 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
1048 cpl->pack = htons(0);
1049 cpl->len = htons(len);
1050 cpl->ctrl1 = cpu_to_be64(cntrl);
1051 write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
1053 txq->stats.tx_bytes += len;
1055 sd = &q->sdesc[q->pidx + (idx >> 1)];
1057 if (sd->coalesce.idx) {
1060 for (i = 0; i < sd->coalesce.idx; i++) {
1061 rte_pktmbuf_free(sd->coalesce.mbuf[i]);
1062 sd->coalesce.mbuf[i] = NULL;
1067 /* store pointers to the mbuf and the sgl used in free_tx_desc.
1068 * each tx desc can hold two pointers corresponding to the value
1069 * of ETH_COALESCE_PKT_PER_DESC
1071 sd->coalesce.mbuf[idx & 1] = mbuf;
1072 sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
1073 sd->coalesce.idx = (idx & 1) + 1;
1075 /* Send the coalesced work request, only if max reached. However,
1076 * if lower latency is preferred over throughput, then don't wait
1077 * for coalescing the next Tx burst and send the packets now.
1080 if (q->coalesce.idx == adap->params.max_tx_coalesce_num ||
1081 (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
1082 ship_tx_pkt_coalesce_wr(adap, txq);
1088 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1089 * @txq: the egress queue
1092 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1094 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
1097 const struct port_info *pi;
1098 struct cpl_tx_pkt_lso_core *lso;
1099 struct adapter *adap;
1100 struct rte_mbuf *m = mbuf;
1101 struct fw_eth_tx_pkt_wr *wr;
1102 struct fw_eth_tx_pkt_vm_wr *vmwr;
1103 struct cpl_tx_pkt_core *cpl;
1104 struct tx_sw_desc *d;
1105 dma_addr_t addr[m->nb_segs];
1106 unsigned int flits, ndesc, cflits;
1107 int l3hdr_len, l4hdr_len, eth_xtra_len;
1115 /* Reject xmit if queue is stopped */
1116 if (unlikely(txq->flags & EQ_STOPPED))
1120 * The chip min packet length is 10 octets but play safe and reject
1121 * anything shorter than an Ethernet header.
1123 if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
1125 rte_pktmbuf_free(m);
1129 max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1130 if ((!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) &&
1131 (unlikely(m->pkt_len > max_pkt_len)))
1134 pi = txq->data->dev_private;
1137 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1138 /* align the end of coalesce WR to a 512 byte boundary */
1139 txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
1141 if (!((m->ol_flags & RTE_MBUF_F_TX_TCP_SEG) ||
1142 m->pkt_len > RTE_ETHER_MAX_LEN)) {
1143 if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
1144 if (unlikely(map_mbuf(mbuf, addr) < 0)) {
1145 dev_warn(adap, "%s: mapping err for coalesce\n",
1147 txq->stats.mapping_err++;
1150 return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
1157 if (txq->q.coalesce.idx)
1158 ship_tx_pkt_coalesce_wr(adap, txq);
1160 flits = calc_tx_flits(m, adap);
1161 ndesc = flits_to_desc(flits);
1162 credits = txq_avail(&txq->q) - ndesc;
1164 if (unlikely(credits < 0)) {
1165 dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
1166 __func__, txq->q.cntxt_id, credits);
1170 if (unlikely(map_mbuf(m, addr) < 0)) {
1171 txq->stats.mapping_err++;
1175 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1176 if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
1177 txq->q.equeidx = txq->q.pidx;
1178 wr_mid |= F_FW_WR_EQUEQ;
1181 wr = (void *)&txq->q.desc[txq->q.pidx];
1182 vmwr = (void *)&txq->q.desc[txq->q.pidx];
1183 wr->equiq_to_len16 = htonl(wr_mid);
1185 wr->r3 = rte_cpu_to_be_64(0);
1186 end = (u64 *)wr + flits;
1188 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
1189 sizeof(vmwr->ethmacsrc) +
1190 sizeof(vmwr->ethtype) +
1191 sizeof(vmwr->vlantci));
1193 vmwr->r3[0] = rte_cpu_to_be_32(0);
1194 vmwr->r3[1] = rte_cpu_to_be_32(0);
1195 memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
1197 end = (u64 *)vmwr + flits;
1201 len += sizeof(*cpl);
1203 /* Coalescing skipped and we send through normal path */
1204 if (!(m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
1205 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
1207 FW_ETH_TX_PKT_VM_WR) |
1208 V_FW_WR_IMMDLEN(len));
1210 cpl = (void *)(wr + 1);
1212 cpl = (void *)(vmwr + 1);
1213 if (m->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
1214 cntrl = hwcsum(adap->params.chip, m) |
1216 txq->stats.tx_cso++;
1220 lso = (void *)(wr + 1);
1222 lso = (void *)(vmwr + 1);
1223 v6 = (m->ol_flags & RTE_MBUF_F_TX_IPV6) != 0;
1224 l3hdr_len = m->l3_len;
1225 l4hdr_len = m->l4_len;
1226 eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
1227 len += sizeof(*lso);
1228 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
1230 FW_ETH_TX_PKT_VM_WR) |
1231 V_FW_WR_IMMDLEN(len));
1232 lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
1233 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
1235 V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1236 V_LSO_IPHDR_LEN(l3hdr_len / 4) |
1237 V_LSO_TCPHDR_LEN(l4hdr_len / 4));
1238 lso->ipid_ofst = htons(0);
1239 lso->mss = htons(m->tso_segsz);
1240 lso->seqno_offset = htonl(0);
1241 if (is_t4(adap->params.chip))
1242 lso->len = htonl(m->pkt_len);
1244 lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
1245 cpl = (void *)(lso + 1);
1247 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1248 cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
1250 cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
1252 cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
1254 V_TXPKT_IPHDR_LEN(l3hdr_len);
1256 txq->stats.tx_cso += m->tso_segsz;
1259 if (m->ol_flags & RTE_MBUF_F_TX_VLAN) {
1260 txq->stats.vlan_ins++;
1261 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
1264 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
1266 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
1267 V_TXPKT_PF(adap->pf));
1269 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
1272 cpl->pack = htons(0);
1273 cpl->len = htons(m->pkt_len);
1274 cpl->ctrl1 = cpu_to_be64(cntrl);
1277 txq->stats.tx_bytes += m->pkt_len;
1278 last_desc = txq->q.pidx + ndesc - 1;
1279 if (last_desc >= (int)txq->q.size)
1280 last_desc -= txq->q.size;
1282 d = &txq->q.sdesc[last_desc];
1283 if (d->coalesce.idx) {
1286 for (i = 0; i < d->coalesce.idx; i++) {
1287 rte_pktmbuf_free(d->coalesce.mbuf[i]);
1288 d->coalesce.mbuf[i] = NULL;
1290 d->coalesce.idx = 0;
1292 write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1294 txq->q.sdesc[last_desc].mbuf = m;
1295 txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1296 txq_advance(&txq->q, ndesc);
1297 ring_tx_db(adap, &txq->q);
1302 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1303 * @q: the SGE control Tx queue
1305 * This is a variant of reclaim_completed_tx() that is used for Tx queues
1306 * that send only immediate data (presently just the control queues) and
1307 * thus do not have any mbufs to release.
1309 static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1311 int hw_cidx = ntohs(q->stat->cidx);
1312 int reclaim = hw_cidx - q->cidx;
1317 q->in_use -= reclaim;
1322 * is_imm - check whether a packet can be sent as immediate data
1325 * Returns true if a packet can be sent as a WR with immediate data.
1327 static inline int is_imm(const struct rte_mbuf *mbuf)
1329 return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
1333 * inline_tx_mbuf: inline a packet's data into TX descriptors
1334 * @q: the TX queue where the packet will be inlined
1335 * @from: pointer to data portion of packet
1336 * @to: pointer after cpl where data has to be inlined
1337 * @len: length of data to inline
1339 * Inline a packet's contents directly to TX descriptors, starting at
1340 * the given position within the TX DMA ring.
1341 * Most of the complexity of this operation is dealing with wrap arounds
1342 * in the middle of the packet we want to inline.
1344 static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
1347 int left = RTE_PTR_DIFF(q->stat, *to);
1349 if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
1350 rte_memcpy(*to, from, len);
1351 *to = RTE_PTR_ADD(*to, len);
1353 rte_memcpy(*to, from, left);
1354 from = RTE_PTR_ADD(from, left);
1356 rte_memcpy((void *)q->desc, from, left);
1357 *to = RTE_PTR_ADD((void *)q->desc, left);
1362 * ctrl_xmit - send a packet through an SGE control Tx queue
1363 * @q: the control queue
1366 * Send a packet through an SGE control Tx queue. Packets sent through
1367 * a control queue must fit entirely as immediate data.
1369 static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
1372 struct fw_wr_hdr *wr;
1375 if (unlikely(!is_imm(mbuf))) {
1377 rte_pktmbuf_free(mbuf);
1381 reclaim_completed_tx_imm(&q->q);
1382 ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
1383 t4_os_lock(&q->ctrlq_lock);
1385 q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
1386 if (unlikely(q->full)) {
1387 t4_os_unlock(&q->ctrlq_lock);
1391 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1393 inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
1394 &dst, mbuf->data_len);
1396 txq_advance(&q->q, ndesc);
1397 if (unlikely(txq_avail(&q->q) < 64))
1398 wr->lo |= htonl(F_FW_WR_EQUEQ);
1402 ring_tx_db(q->adapter, &q->q);
1403 t4_os_unlock(&q->ctrlq_lock);
1405 rte_pktmbuf_free(mbuf);
1410 * t4_mgmt_tx - send a management message
1411 * @q: the control queue
1412 * @mbuf: the packet containing the management message
1414 * Send a management message through control queue.
1416 int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
1418 return ctrl_xmit(q, mbuf);
1422 * alloc_ring - allocate resources for an SGE descriptor ring
1423 * @dev: the port associated with the queue
1424 * @z_name: memzone's name
1425 * @queue_id: queue index
1426 * @socket_id: preferred socket id for memory allocations
1427 * @nelem: the number of descriptors
1428 * @elem_size: the size of each descriptor
1429 * @stat_size: extra space in HW ring for status information
1430 * @sw_size: the size of the SW state associated with each ring element
1431 * @phys: the physical address of the allocated ring
1432 * @metadata: address of the array holding the SW state for the ring
1434 * Allocates resources for an SGE descriptor ring, such as Tx queues,
1435 * free buffer lists, or response queues. Each SGE ring requires
1436 * space for its HW descriptors plus, optionally, space for the SW state
1437 * associated with each HW entry (the metadata). The function returns
1438 * three values: the virtual address for the HW ring (the return value
1439 * of the function), the bus address of the HW ring, and the address
1442 static void *alloc_ring(struct rte_eth_dev *dev, const char *z_name,
1443 uint16_t queue_id, int socket_id, size_t nelem,
1444 size_t elem_size, size_t stat_size, size_t sw_size,
1445 dma_addr_t *phys, void *metadata)
1447 size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
1448 char z_name_sw[RTE_MEMZONE_NAMESIZE];
1449 const struct rte_memzone *tz;
1452 snprintf(z_name_sw, sizeof(z_name_sw), "eth_p%d_q%d_%s_sw_ring",
1453 dev->data->port_id, queue_id, z_name);
1455 dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1456 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
1457 " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
1458 stat_size, queue_id, socket_id, z_name, z_name_sw);
1461 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
1462 * handle the maximum ring size is allocated in order to allow for
1463 * resizing in later calls to the queue setup function.
1465 tz = rte_eth_dma_zone_reserve(dev, z_name, queue_id, len, 4096,
1470 memset(tz->addr, 0, len);
1472 s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
1473 RTE_CACHE_LINE_SIZE, socket_id);
1476 dev_err(adapter, "%s: failed to get sw_ring memory\n",
1482 *(void **)metadata = s;
1484 *phys = (uint64_t)tz->iova;
1488 #define CXGB4_MSG_AN ((void *)1)
1491 * rspq_next - advance to the next entry in a response queue
1494 * Updates the state of a response queue to advance it to the next entry.
1496 static inline void rspq_next(struct sge_rspq *q)
1498 q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
1499 if (unlikely(++q->cidx == q->size)) {
1502 q->cur_desc = q->desc;
1506 static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
1509 pkt->packet_type |= ptype;
1510 pkt->ol_flags |= ol_flags;
1513 static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
1514 const struct cpl_rx_pkt *cpl,
1515 struct rte_mbuf *pkt)
1520 if (adap->params.tp.rx_pkt_encap)
1521 err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
1523 err_vec = ntohs(cpl->err_vec);
1525 csum_ok = cpl->csum_calc && !err_vec;
1528 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
1529 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
1531 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
1533 if (cpl->l2info & htonl(F_RXF_IP))
1534 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
1535 csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
1536 RTE_MBUF_F_RX_IP_CKSUM_BAD);
1537 else if (cpl->l2info & htonl(F_RXF_IP6))
1538 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
1539 csum_ok ? RTE_MBUF_F_RX_IP_CKSUM_GOOD :
1540 RTE_MBUF_F_RX_IP_CKSUM_BAD);
1542 if (cpl->l2info & htonl(F_RXF_TCP))
1543 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
1544 csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
1545 RTE_MBUF_F_RX_L4_CKSUM_BAD);
1546 else if (cpl->l2info & htonl(F_RXF_UDP))
1547 cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
1548 csum_ok ? RTE_MBUF_F_RX_L4_CKSUM_GOOD :
1549 RTE_MBUF_F_RX_L4_CKSUM_BAD);
1553 * process_responses - process responses from an SGE response queue
1554 * @q: the ingress queue to process
1555 * @budget: how many responses can be processed in this round
1556 * @rx_pkts: mbuf to put the pkts
1558 * Process responses from an SGE response queue up to the supplied budget.
1559 * Responses include received packets as well as control messages from FW
1562 * Additionally choose the interrupt holdoff time for the next interrupt
1563 * on this queue. If the system is under memory shortage use a fairly
1564 * long delay to help recovery.
1566 static int process_responses(struct sge_rspq *q, int budget,
1567 struct rte_mbuf **rx_pkts)
1569 int ret = 0, rsp_type;
1570 int budget_left = budget;
1571 const struct rsp_ctrl *rc;
1572 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1574 while (likely(budget_left)) {
1575 if (q->cidx == ntohs(q->stat->pidx))
1578 rc = (const struct rsp_ctrl *)
1579 ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
1582 * Ensure response has been read
1585 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1587 if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
1588 struct sge *s = &q->adapter->sge;
1589 unsigned int stat_pidx;
1592 stat_pidx = ntohs(q->stat->pidx);
1593 stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
1594 while (stat_pidx_diff && budget_left) {
1595 const struct rx_sw_desc *rsd =
1596 &rxq->fl.sdesc[rxq->fl.cidx];
1597 const struct rss_header *rss_hdr =
1598 (const void *)q->cur_desc;
1599 const struct cpl_rx_pkt *cpl =
1600 (const void *)&q->cur_desc[1];
1601 struct rte_mbuf *pkt, *npkt;
1604 rc = (const struct rsp_ctrl *)
1605 ((const char *)q->cur_desc +
1606 (q->iqe_len - sizeof(*rc)));
1608 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1609 if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
1612 len = ntohl(rc->pldbuflen_qid);
1613 BUG_ON(!(len & F_RSPD_NEWBUF));
1616 len = G_RSPD_LEN(len);
1619 /* Chain mbufs into len if necessary */
1621 struct rte_mbuf *new_pkt = rsd->buf;
1623 bufsz = min(get_buf_size(q->adapter,
1625 new_pkt->data_len = bufsz;
1626 unmap_rx_buf(&rxq->fl);
1628 npkt->next = new_pkt;
1631 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1636 cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
1638 if (!rss_hdr->filter_tid &&
1639 rss_hdr->hash_type) {
1640 pkt->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
1642 ntohl(rss_hdr->hash_val);
1646 pkt->vlan_tci = ntohs(cpl->vlan);
1648 rte_pktmbuf_adj(pkt, s->pktshift);
1650 rxq->stats.rx_bytes += pkt->pkt_len;
1651 rx_pkts[budget - budget_left] = pkt;
1658 } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
1659 ret = q->handler(q, q->cur_desc, NULL);
1661 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1664 if (unlikely(ret)) {
1665 /* couldn't process descriptor, back off for recovery */
1666 q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1675 * If this is a Response Queue with an associated Free List and
1676 * there's room for another chunk of new Free List buffer pointers,
1677 * refill the Free List.
1680 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1681 __refill_fl(q->adapter, &rxq->fl);
1683 return budget - budget_left;
1686 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
1687 unsigned int budget, unsigned int *work_done)
1689 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1690 unsigned int cidx_inc;
1691 unsigned int params;
1694 if (unlikely(rxq->flags & IQ_STOPPED)) {
1699 *work_done = process_responses(q, budget, rx_pkts);
1702 cidx_inc = R_IDXDIFF(q, gts_idx);
1704 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1705 __refill_fl(q->adapter, &rxq->fl);
1707 params = q->intr_params;
1708 q->next_intr_params = params;
1709 val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
1711 if (unlikely(!q->bar2_addr)) {
1712 u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
1713 T4VF_SGE_BASE_ADDR +
1716 t4_write_reg(q->adapter, reg,
1717 val | V_INGRESSQID((u32)q->cntxt_id));
1719 writel(val | V_INGRESSQID(q->bar2_qid),
1720 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
1721 /* This Write memory Barrier will force the
1722 * write to the User Doorbell area to be
1727 q->gts_idx = q->cidx;
1733 * bar2_address - return the BAR2 address for an SGE Queue's Registers
1734 * @adapter: the adapter
1735 * @qid: the SGE Queue ID
1736 * @qtype: the SGE Queue Type (Egress or Ingress)
1737 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
1739 * Returns the BAR2 address for the SGE Queue Registers associated with
1740 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
1741 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
1742 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
1743 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
1745 static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
1746 enum t4_bar2_qtype qtype,
1747 unsigned int *pbar2_qid)
1752 ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
1756 return adapter->bar2 + bar2_qoffset;
1759 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_eth_rxq *rxq)
1761 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1763 rxq->flags &= ~IQ_STOPPED;
1764 return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
1765 rxq->rspq.cntxt_id, fl_id, 0xffff);
1768 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_eth_rxq *rxq)
1770 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1772 rxq->flags |= IQ_STOPPED;
1773 return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
1774 rxq->rspq.cntxt_id, fl_id, 0xffff);
1778 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
1779 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
1781 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1782 struct rte_eth_dev *eth_dev, int intr_idx,
1783 struct sge_fl *fl, rspq_handler_t hnd, int cong,
1784 struct rte_mempool *mp, int queue_id, int socket_id)
1788 struct sge *s = &adap->sge;
1789 struct port_info *pi = eth_dev->data->dev_private;
1790 unsigned int nb_refill;
1793 /* Size needs to be multiple of 16, including status entry. */
1794 iq->size = cxgbe_roundup(iq->size, 16);
1796 iq->desc = alloc_ring(eth_dev, fwevtq ? "fwq_ring" : "rx_ring",
1797 queue_id, socket_id, iq->size, iq->iqe_len,
1798 0, 0, &iq->phys_addr, NULL);
1802 memset(&c, 0, sizeof(c));
1803 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1804 F_FW_CMD_WRITE | F_FW_CMD_EXEC);
1807 pciechan = pi->tx_chan;
1808 c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
1809 V_FW_IQ_CMD_VFN(0));
1811 c.iqns_to_fl0congen =
1812 htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
1813 V_FW_IQ_CMD_IQTYPE(cong ?
1815 FW_IQ_IQTYPE_OFLD) |
1818 pciechan = pi->port_id;
1821 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1823 c.type_to_iqandstindex =
1824 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1825 V_FW_IQ_CMD_IQASYNCH(fwevtq) |
1826 V_FW_IQ_CMD_VIID(pi->viid) |
1827 V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
1828 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
1829 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1831 c.iqdroprss_to_iqesize =
1832 htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
1833 F_FW_IQ_CMD_IQGTSMODE |
1834 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
1835 V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
1836 c.iqsize = htons(iq->size);
1837 c.iqaddr = cpu_to_be64(iq->phys_addr);
1840 struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
1842 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1845 * Allocate the ring for the hardware free list (with space
1846 * for its status page) along with the associated software
1847 * descriptor ring. The free list size needs to be a multiple
1848 * of the Egress Queue Unit and at least 2 Egress Units larger
1849 * than the SGE's Egress Congrestion Threshold
1850 * (fl_starve_thres - 1).
1852 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
1853 fl->size = s->fl_starve_thres - 1 + 2 * 8;
1854 fl->size = cxgbe_roundup(fl->size, 8);
1856 fl->desc = alloc_ring(eth_dev, "fl_ring", queue_id, socket_id,
1857 fl->size, sizeof(__be64), s->stat_len,
1858 sizeof(struct rx_sw_desc),
1859 &fl->addr, &fl->sdesc);
1865 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
1866 c.iqns_to_fl0congen |=
1867 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1868 (unlikely(rxq->usembufs) ?
1869 0 : F_FW_IQ_CMD_FL0PACKEN) |
1870 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1871 F_FW_IQ_CMD_FL0PADEN);
1872 if (is_pf4(adap) && cong >= 0)
1873 c.iqns_to_fl0congen |=
1874 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1875 F_FW_IQ_CMD_FL0CONGCIF |
1876 F_FW_IQ_CMD_FL0CONGEN);
1878 /* In T6, for egress queue type FL there is internal overhead
1879 * of 16B for header going into FLM module.
1880 * Hence maximum allowed burst size will be 448 bytes.
1882 c.fl0dcaen_to_fl0cidxfthresh =
1883 htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
1884 X_FETCHBURSTMIN_128B :
1885 X_FETCHBURSTMIN_64B) |
1886 V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
1887 X_FETCHBURSTMAX_512B :
1888 X_FETCHBURSTMAX_256B));
1889 c.fl0size = htons(flsz);
1890 c.fl0addr = cpu_to_be64(fl->addr);
1894 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1896 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
1900 iq->cur_desc = iq->desc;
1904 iq->next_intr_params = iq->intr_params;
1905 iq->cntxt_id = ntohs(c.iqid);
1906 iq->abs_id = ntohs(c.physiqid);
1907 iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
1909 iq->size--; /* subtract status entry */
1910 iq->stat = (void *)&iq->desc[iq->size * 8];
1911 iq->eth_dev = eth_dev;
1913 iq->port_id = pi->pidx;
1916 /* set offset to -1 to distinguish ingress queues without FL */
1917 iq->offset = fl ? 0 : -1;
1920 fl->cntxt_id = ntohs(c.fl0id);
1925 fl->alloc_failed = 0;
1928 * Note, we must initialize the BAR2 Free List User Doorbell
1929 * information before refilling the Free List!
1931 fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
1932 T4_BAR2_QTYPE_EGRESS,
1935 nb_refill = refill_fl(adap, fl, fl_cap(fl));
1936 if (nb_refill != fl_cap(fl)) {
1938 dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
1945 * For T5 and later we attempt to set up the Congestion Manager values
1946 * of the new RX Ethernet Queue. This should really be handled by
1947 * firmware because it's more complex than any host driver wants to
1948 * get involved with and it's different per chip and this is almost
1949 * certainly wrong. Formware would be wrong as well, but it would be
1950 * a lot easier to fix in one place ... For now we do something very
1951 * simple (and hopefully less wrong).
1953 if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
1954 u8 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
1955 u32 param, val, ch_map = 0;
1958 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1959 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1960 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
1962 val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
1964 val = V_CONMCTXT_CNGTPMODE(
1965 X_CONMCTXT_CNGTPMODE_CHANNEL);
1966 for (i = 0; i < 4; i++) {
1967 if (cong & (1 << i))
1968 ch_map |= 1 << (i << cng_ch_bits_log);
1970 val |= V_CONMCTXT_CNGCHMAP(ch_map);
1972 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1975 dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1976 iq->cntxt_id, -ret);
1982 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1983 iq->cntxt_id, fl->cntxt_id, 0xffff);
1990 if (fl && fl->desc) {
1991 rte_free(fl->sdesc);
1999 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
2000 unsigned int abs_id)
2004 q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
2011 q->coalesce.idx = 0;
2012 q->coalesce.len = 0;
2013 q->coalesce.flits = 0;
2014 q->last_coal_idx = 0;
2016 q->stat = (void *)&q->desc[q->size];
2019 int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
2022 * TODO: For flow-control, queue may be stopped waiting to reclaim
2024 * Ensure queue is in EQ_STOPPED state before starting it.
2026 if (!(txq->flags & EQ_STOPPED))
2029 txq->flags &= ~EQ_STOPPED;
2034 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
2036 txq->flags |= EQ_STOPPED;
2041 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2042 struct rte_eth_dev *eth_dev, uint16_t queue_id,
2043 unsigned int iqid, int socket_id)
2046 struct fw_eq_eth_cmd c;
2047 struct sge *s = &adap->sge;
2048 struct port_info *pi = eth_dev->data->dev_private;
2051 /* Add status entries */
2052 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2054 txq->q.desc = alloc_ring(eth_dev, "tx_ring", queue_id, socket_id,
2055 txq->q.size, sizeof(struct tx_desc),
2056 s->stat_len, sizeof(struct tx_sw_desc),
2057 &txq->q.phys_addr, &txq->q.sdesc);
2061 memset(&c, 0, sizeof(c));
2062 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
2063 F_FW_CMD_WRITE | F_FW_CMD_EXEC);
2065 pciechan = pi->tx_chan;
2066 c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
2067 V_FW_EQ_ETH_CMD_VFN(0));
2069 pciechan = pi->port_id;
2072 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
2073 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
2074 c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
2075 V_FW_EQ_ETH_CMD_VIID(pi->viid));
2076 c.fetchszm_to_iqid =
2077 htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
2078 V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
2079 F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
2081 htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2082 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2083 V_FW_EQ_ETH_CMD_EQSIZE(nentries));
2084 c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
2087 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2089 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
2091 rte_free(txq->q.sdesc);
2092 txq->q.sdesc = NULL;
2097 init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
2098 G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
2100 txq->stats.pkts = 0;
2101 txq->stats.tx_cso = 0;
2102 txq->stats.coal_wr = 0;
2103 txq->stats.vlan_ins = 0;
2104 txq->stats.tx_bytes = 0;
2105 txq->stats.coal_pkts = 0;
2106 txq->stats.mapping_err = 0;
2107 txq->flags |= EQ_STOPPED;
2108 txq->eth_dev = eth_dev;
2109 txq->data = eth_dev->data;
2110 t4_os_lock_init(&txq->txq_lock);
2114 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2115 struct rte_eth_dev *eth_dev, uint16_t queue_id,
2116 unsigned int iqid, int socket_id)
2119 struct fw_eq_ctrl_cmd c;
2120 struct sge *s = &adap->sge;
2121 struct port_info *pi = eth_dev->data->dev_private;
2123 /* Add status entries */
2124 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2126 txq->q.desc = alloc_ring(eth_dev, "ctrl_tx_ring", queue_id,
2127 socket_id, txq->q.size, sizeof(struct tx_desc),
2128 0, 0, &txq->q.phys_addr, NULL);
2132 memset(&c, 0, sizeof(c));
2133 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
2134 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
2135 V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
2136 V_FW_EQ_CTRL_CMD_VFN(0));
2137 c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
2138 F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
2139 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
2140 c.physeqid_pkd = htonl(0);
2141 c.fetchszm_to_iqid =
2142 htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
2143 V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2144 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
2146 htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2147 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2148 V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
2149 c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2151 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2157 init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
2158 G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
2159 txq->adapter = adap;
2164 static void free_txq(struct sge_txq *q)
2171 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2174 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2176 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
2177 rq->cntxt_id, fl_id, 0xffff);
2183 free_rx_bufs(fl, fl->avail);
2184 rte_free(fl->sdesc);
2192 * Clear all queues of the port
2194 * Note: This function must only be called after rx and tx path
2195 * of the port have been disabled.
2197 void t4_sge_eth_clear_queues(struct port_info *pi)
2199 struct adapter *adap = pi->adapter;
2200 struct sge_eth_rxq *rxq;
2201 struct sge_eth_txq *txq;
2204 rxq = &adap->sge.ethrxq[pi->first_rxqset];
2205 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
2207 t4_sge_eth_rxq_stop(adap, rxq);
2210 txq = &adap->sge.ethtxq[pi->first_txqset];
2211 for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
2213 struct sge_txq *q = &txq->q;
2215 t4_sge_eth_txq_stop(txq);
2216 reclaim_completed_tx(q);
2217 free_tx_desc(q, q->size);
2218 q->equeidx = q->pidx;
2223 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
2225 if (rxq->rspq.desc) {
2226 t4_sge_eth_rxq_stop(adap, rxq);
2227 free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
2231 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
2234 t4_sge_eth_txq_stop(txq);
2235 reclaim_completed_tx(&txq->q);
2236 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
2237 free_tx_desc(&txq->q, txq->q.size);
2238 rte_free(txq->q.sdesc);
2243 void t4_sge_eth_release_queues(struct port_info *pi)
2245 struct adapter *adap = pi->adapter;
2246 struct sge_eth_rxq *rxq;
2247 struct sge_eth_txq *txq;
2250 rxq = &adap->sge.ethrxq[pi->first_rxqset];
2251 /* clean up Ethernet Tx/Rx queues */
2252 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
2253 /* Free only the queues allocated */
2254 if (rxq->rspq.desc) {
2255 t4_sge_eth_rxq_release(adap, rxq);
2256 rte_eth_dma_zone_free(rxq->rspq.eth_dev, "fl_ring", i);
2257 rte_eth_dma_zone_free(rxq->rspq.eth_dev, "rx_ring", i);
2258 rxq->rspq.eth_dev = NULL;
2262 txq = &adap->sge.ethtxq[pi->first_txqset];
2263 for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
2264 /* Free only the queues allocated */
2266 t4_sge_eth_txq_release(adap, txq);
2267 rte_eth_dma_zone_free(txq->eth_dev, "tx_ring", i);
2268 txq->eth_dev = NULL;
2273 void t4_sge_tx_monitor_start(struct adapter *adap)
2275 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
2278 void t4_sge_tx_monitor_stop(struct adapter *adap)
2280 rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
2284 * t4_free_sge_resources - free SGE resources
2285 * @adap: the adapter
2287 * Frees resources used by the SGE queue sets.
2289 void t4_free_sge_resources(struct adapter *adap)
2293 /* clean up control Tx queues */
2294 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2295 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2298 reclaim_completed_tx_imm(&cq->q);
2299 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
2301 rte_eth_dma_zone_free(adap->eth_dev, "ctrl_tx_ring", i);
2302 rte_mempool_free(cq->mb_pool);
2307 /* clean up firmware event queue */
2308 if (adap->sge.fw_evtq.desc) {
2309 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2310 rte_eth_dma_zone_free(adap->eth_dev, "fwq_ring", 0);
2315 * t4_sge_init - initialize SGE
2316 * @adap: the adapter
2318 * Performs SGE initialization needed every time after a chip reset.
2319 * We do not initialize any of the queues here, instead the driver
2320 * top-level must request those individually.
2322 * Called in two different modes:
2324 * 1. Perform actual hardware initialization and record hard-coded
2325 * parameters which were used. This gets used when we're the
2326 * Master PF and the Firmware Configuration File support didn't
2327 * work for some reason.
2329 * 2. We're not the Master PF or initialization was performed with
2330 * a Firmware Configuration File. In this case we need to grab
2331 * any of the SGE operating parameters that we need to have in
2332 * order to do our job and make sure we can live with them ...
2334 static int t4_sge_init_soft(struct adapter *adap)
2336 struct sge *s = &adap->sge;
2337 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2338 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2339 u32 ingress_rx_threshold;
2342 * Verify that CPL messages are going to the Ingress Queue for
2343 * process_responses() and that only packet data is going to the
2346 if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
2347 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2348 dev_err(adap, "bad SGE CPL MODE\n");
2353 * Validate the Host Buffer Register Array indices that we want to
2356 * XXX Note that we should really read through the Host Buffer Size
2357 * XXX register array and find the indices of the Buffer Sizes which
2358 * XXX meet our needs!
2360 #define READ_FL_BUF(x) \
2361 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
2363 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2364 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2365 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2366 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2369 * We only bother using the Large Page logic if the Large Page Buffer
2370 * is larger than our Page Size Buffer.
2372 if (fl_large_pg <= fl_small_pg)
2378 * The Page Size Buffer must be exactly equal to our Page Size and the
2379 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2381 if (fl_small_pg != CXGBE_PAGE_SIZE ||
2382 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2383 dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
2384 fl_small_pg, fl_large_pg);
2388 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2390 if (adap->use_unpacked_mode) {
2393 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
2394 dev_err(adap, "bad SGE FL small MTU %d\n",
2398 if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2399 dev_err(adap, "bad SGE FL large MTU %d\n",
2408 * Retrieve our RX interrupt holdoff timer values and counter
2409 * threshold values from the SGE parameters.
2411 timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
2412 timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
2413 timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
2414 s->timer_val[0] = core_ticks_to_us(adap,
2415 G_TIMERVALUE0(timer_value_0_and_1));
2416 s->timer_val[1] = core_ticks_to_us(adap,
2417 G_TIMERVALUE1(timer_value_0_and_1));
2418 s->timer_val[2] = core_ticks_to_us(adap,
2419 G_TIMERVALUE2(timer_value_2_and_3));
2420 s->timer_val[3] = core_ticks_to_us(adap,
2421 G_TIMERVALUE3(timer_value_2_and_3));
2422 s->timer_val[4] = core_ticks_to_us(adap,
2423 G_TIMERVALUE4(timer_value_4_and_5));
2424 s->timer_val[5] = core_ticks_to_us(adap,
2425 G_TIMERVALUE5(timer_value_4_and_5));
2427 ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
2428 s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
2429 s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
2430 s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
2431 s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
2436 int t4_sge_init(struct adapter *adap)
2438 struct sge *s = &adap->sge;
2439 u32 sge_control, sge_conm_ctrl;
2440 int ret, egress_threshold;
2443 * Ingress Padding Boundary and Egress Status Page Size are set up by
2444 * t4_fixup_host_params().
2446 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
2447 s->pktshift = G_PKTSHIFT(sge_control);
2448 s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
2449 s->fl_align = t4_fl_pkt_align(adap);
2450 ret = t4_sge_init_soft(adap);
2452 dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
2458 * A FL with <= fl_starve_thres buffers is starving and a periodic
2459 * timer will attempt to refill it. This needs to be larger than the
2460 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2461 * stuck waiting for new packets while the SGE is waiting for us to
2462 * give it more Free List entries. (Note that the SGE's Egress
2463 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2464 * there was only a single field to control this. For T5 there's the
2465 * original field which now only applies to Unpacked Mode Free List
2466 * buffers and a new field which only applies to Packed Mode Free List
2469 sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
2470 if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
2471 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
2473 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
2474 s->fl_starve_thres = 2 * egress_threshold + 1;
2479 int t4vf_sge_init(struct adapter *adap)
2481 struct sge_params *sge_params = &adap->params.sge;
2482 u32 sge_ingress_queues_per_page;
2483 u32 sge_egress_queues_per_page;
2484 u32 sge_control, sge_control2;
2485 u32 fl_small_pg, fl_large_pg;
2486 u32 sge_ingress_rx_threshold;
2487 u32 sge_timer_value_0_and_1;
2488 u32 sge_timer_value_2_and_3;
2489 u32 sge_timer_value_4_and_5;
2490 u32 sge_congestion_control;
2491 struct sge *s = &adap->sge;
2492 unsigned int s_hps, s_qpp;
2493 u32 sge_host_page_size;
2494 u32 params[7], vals[7];
2497 /* query basic params from fw */
2498 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2499 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
2500 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2501 V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
2502 params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2503 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
2504 params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2505 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
2506 params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2507 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
2508 params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2509 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
2510 params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2511 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
2512 v = t4vf_query_params(adap, 7, params, vals);
2513 if (v != FW_SUCCESS)
2516 sge_control = vals[0];
2517 sge_host_page_size = vals[1];
2518 fl_small_pg = vals[2];
2519 fl_large_pg = vals[3];
2520 sge_timer_value_0_and_1 = vals[4];
2521 sge_timer_value_2_and_3 = vals[5];
2522 sge_timer_value_4_and_5 = vals[6];
2525 * Start by vetting the basic SGE parameters which have been set up by
2526 * the Physical Function Driver.
2529 /* We only bother using the Large Page logic if the Large Page Buffer
2530 * is larger than our Page Size Buffer.
2532 if (fl_large_pg <= fl_small_pg)
2535 /* The Page Size Buffer must be exactly equal to our Page Size and the
2536 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2538 if (fl_small_pg != CXGBE_PAGE_SIZE ||
2539 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2540 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2541 fl_small_pg, fl_large_pg);
2545 if ((sge_control & F_RXPKTCPLMODE) !=
2546 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2547 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2552 /* Grab ingress packing boundary from SGE_CONTROL2 for */
2553 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2554 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
2555 v = t4vf_query_params(adap, 1, params, vals);
2556 if (v != FW_SUCCESS) {
2557 dev_err(adapter, "Unable to get SGE Control2; "
2558 "probably old firmware.\n");
2561 sge_control2 = vals[0];
2563 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2564 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
2565 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2566 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
2567 v = t4vf_query_params(adap, 2, params, vals);
2568 if (v != FW_SUCCESS)
2570 sge_ingress_rx_threshold = vals[0];
2571 sge_congestion_control = vals[1];
2572 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2573 V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
2574 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2575 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
2576 v = t4vf_query_params(adap, 2, params, vals);
2577 if (v != FW_SUCCESS) {
2578 dev_warn(adap, "Unable to get VF SGE Queues/Page; "
2579 "probably old firmware.\n");
2582 sge_egress_queues_per_page = vals[0];
2583 sge_ingress_queues_per_page = vals[1];
2586 * We need the Queues/Page for our VF. This is based on the
2587 * PF from which we're instantiated and is indexed in the
2588 * register we just read.
2590 s_hps = (S_HOSTPAGESIZEPF0 +
2591 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
2593 ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
2595 s_qpp = (S_QUEUESPERPAGEPF0 +
2596 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
2597 sge_params->eq_qpp =
2598 ((sge_egress_queues_per_page >> s_qpp)
2599 & M_QUEUESPERPAGEPF0);
2600 sge_params->iq_qpp =
2601 ((sge_ingress_queues_per_page >> s_qpp)
2602 & M_QUEUESPERPAGEPF0);
2605 * Now translate the queried parameters into our internal forms.
2608 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2609 s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
2611 s->pktshift = G_PKTSHIFT(sge_control);
2612 s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
2615 * A FL with <= fl_starve_thres buffers is starving and a periodic
2616 * timer will attempt to refill it. This needs to be larger than the
2617 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2618 * stuck waiting for new packets while the SGE is waiting for us to
2619 * give it more Free List entries. (Note that the SGE's Egress
2620 * Congestion Threshold is in units of 2 Free List pointers.)
2622 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
2624 s->fl_starve_thres =
2625 G_EGRTHRESHOLDPACKING(sge_congestion_control);
2629 s->fl_starve_thres =
2630 G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
2633 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2636 * Save RX interrupt holdoff timer values and counter
2637 * threshold values from the SGE parameters.
2639 s->timer_val[0] = core_ticks_to_us(adap,
2640 G_TIMERVALUE0(sge_timer_value_0_and_1));
2641 s->timer_val[1] = core_ticks_to_us(adap,
2642 G_TIMERVALUE1(sge_timer_value_0_and_1));
2643 s->timer_val[2] = core_ticks_to_us(adap,
2644 G_TIMERVALUE2(sge_timer_value_2_and_3));
2645 s->timer_val[3] = core_ticks_to_us(adap,
2646 G_TIMERVALUE3(sge_timer_value_2_and_3));
2647 s->timer_val[4] = core_ticks_to_us(adap,
2648 G_TIMERVALUE4(sge_timer_value_4_and_5));
2649 s->timer_val[5] = core_ticks_to_us(adap,
2650 G_TIMERVALUE5(sge_timer_value_4_and_5));
2651 s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
2652 s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
2653 s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
2654 s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);