1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
14 #include <netinet/in.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_memzone.h>
27 #include <rte_tailq.h>
29 #include <rte_alarm.h>
30 #include <rte_ether.h>
31 #include <rte_ethdev_driver.h>
32 #include <rte_malloc.h>
33 #include <rte_random.h>
41 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
42 struct sge_eth_txq *txq);
45 * Max number of Rx buffers we replenish at a time.
47 #define MAX_RX_REFILL 64U
49 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
52 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
55 #define MAX_IMM_TX_PKT_LEN 256
58 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
59 * per mbuf buffer). We currently only support two sizes for 1500- and
60 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
61 * much need for that ...
63 #define FL_MTU_SMALL 1500
64 #define FL_MTU_LARGE 9000
66 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
69 struct sge *s = &adapter->sge;
71 return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
75 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
76 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
79 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
80 * these to specify the buffer size as an index into the SGE Free List Buffer
81 * Size register array. We also use bit 4, when the buffer has been unmapped
82 * for DMA, but this is of course never sent to the hardware and is only used
83 * to prevent double unmappings. All of the above requires that the Free List
84 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
85 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
86 * Free List Buffer alignment is 32 bytes, this works out for us ...
89 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
90 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
91 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
94 * XXX We shouldn't depend on being able to use these indices.
95 * XXX Especially when some other Master PF has initialized the
96 * XXX adapter or we use the Firmware Configuration File. We
97 * XXX should really search through the Host Buffer Size register
98 * XXX array for the appropriately sized buffer indices.
100 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
101 RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
103 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
104 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
108 * txq_avail - return the number of available slots in a Tx queue
111 * Returns the number of descriptors in a Tx queue available to write new
114 static inline unsigned int txq_avail(const struct sge_txq *q)
116 return q->size - 1 - q->in_use;
119 static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
121 struct rte_mbuf *m = mbuf;
123 for (; m; m = m->next, addr++) {
124 *addr = m->buf_iova + rte_pktmbuf_headroom(m);
135 * free_tx_desc - reclaims Tx descriptors and their buffers
136 * @q: the Tx queue to reclaim descriptors from
137 * @n: the number of descriptors to reclaim
139 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
140 * Tx buffers. Called with the Tx queue lock held.
142 static void free_tx_desc(struct sge_txq *q, unsigned int n)
144 struct tx_sw_desc *d;
145 unsigned int cidx = 0;
149 if (d->mbuf) { /* an SGL is present */
150 rte_pktmbuf_free(d->mbuf);
153 if (d->coalesce.idx) {
156 for (i = 0; i < d->coalesce.idx; i++) {
157 rte_pktmbuf_free(d->coalesce.mbuf[i]);
158 d->coalesce.mbuf[i] = NULL;
163 if (++cidx == q->size) {
167 RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
171 static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
173 struct tx_sw_desc *d;
174 unsigned int cidx = q->cidx;
178 if (d->mbuf) { /* an SGL is present */
179 rte_pktmbuf_free(d->mbuf);
183 if (++cidx == q->size) {
192 * fl_cap - return the capacity of a free-buffer list
195 * Returns the capacity of a free-buffer list. The capacity is less than
196 * the size because one descriptor needs to be left unpopulated, otherwise
197 * HW will think the FL is empty.
199 static inline unsigned int fl_cap(const struct sge_fl *fl)
201 return fl->size - 8; /* 1 descriptor = 8 buffers */
205 * fl_starving - return whether a Free List is starving.
206 * @adapter: pointer to the adapter
209 * Tests specified Free List to see whether the number of buffers
210 * available to the hardware has falled below our "starvation"
213 static inline bool fl_starving(const struct adapter *adapter,
214 const struct sge_fl *fl)
216 const struct sge *s = &adapter->sge;
218 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
221 static inline unsigned int get_buf_size(struct adapter *adapter,
222 const struct rx_sw_desc *d)
224 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
225 unsigned int buf_size = 0;
227 switch (rx_buf_size_idx) {
228 case RX_SMALL_MTU_BUF:
229 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
232 case RX_LARGE_MTU_BUF:
233 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
245 * free_rx_bufs - free the Rx buffers on an SGE free list
246 * @q: the SGE free list to free buffers from
247 * @n: how many buffers to free
249 * Release the next @n buffers on an SGE free-buffer Rx queue. The
250 * buffers must be made inaccessible to HW before calling this function.
252 static void free_rx_bufs(struct sge_fl *q, int n)
254 unsigned int cidx = q->cidx;
255 struct rx_sw_desc *d;
260 rte_pktmbuf_free(d->buf);
264 if (++cidx == q->size) {
274 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
275 * @q: the SGE free list
277 * Unmap the current buffer on an SGE free-buffer Rx queue. The
278 * buffer must be made inaccessible to HW before calling this function.
280 * This is similar to @free_rx_bufs above but does not free the buffer.
281 * Do note that the FL still loses any further access to the buffer.
283 static void unmap_rx_buf(struct sge_fl *q)
285 if (++q->cidx == q->size)
290 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
292 if (q->pend_cred >= 64) {
293 u32 val = adap->params.arch.sge_fl_db;
295 if (is_t4(adap->params.chip))
296 val |= V_PIDX(q->pend_cred / 8);
298 val |= V_PIDX_T5(q->pend_cred / 8);
301 * Make sure all memory writes to the Free List queue are
302 * committed before we tell the hardware about them.
307 * If we don't have access to the new User Doorbell (T5+), use
308 * the old doorbell mechanism; otherwise use the new BAR2
311 if (unlikely(!q->bar2_addr)) {
312 u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
316 t4_write_reg_relaxed(adap, reg,
317 val | V_QID(q->cntxt_id));
319 writel_relaxed(val | V_QID(q->bar2_qid),
320 (void *)((uintptr_t)q->bar2_addr +
324 * This Write memory Barrier will force the write to
325 * the User Doorbell area to be flushed.
333 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
337 sd->dma_addr = mapping; /* includes size low bits */
341 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
343 * @q: the ring to refill
344 * @n: the number of new buffers to allocate
346 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
347 * allocated with the supplied gfp flags. The caller must assure that
348 * @n does not exceed the queue's capacity. If afterwards the queue is
349 * found critically low mark it as starving in the bitmap of starving FLs.
351 * Returns the number of buffers allocated.
353 static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
356 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
357 unsigned int cred = q->avail;
358 __be64 *d = &q->desc[q->pidx];
359 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
360 unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
361 struct rte_mbuf *buf_bulk[n];
363 struct rte_pktmbuf_pool_private *mbp_priv;
364 u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
366 /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
367 mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
369 ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
370 buf_size_idx = RX_LARGE_MTU_BUF;
372 ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
373 if (unlikely(ret != 0)) {
374 dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
377 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
381 for (i = 0; i < n; i++) {
382 struct rte_mbuf *mbuf = buf_bulk[i];
386 dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
388 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
392 rte_mbuf_refcnt_set(mbuf, 1);
394 (uint16_t)(RTE_PTR_ALIGN((char *)mbuf->buf_addr +
395 RTE_PKTMBUF_HEADROOM,
396 adap->sge.fl_align) -
397 (char *)mbuf->buf_addr);
400 mbuf->port = rxq->rspq.port_id;
402 mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
405 mapping |= buf_size_idx;
406 *d++ = cpu_to_be64(mapping);
407 set_rx_sw_desc(sd, mbuf, mapping);
411 if (++q->pidx == q->size) {
418 out: cred = q->avail - cred;
419 q->pend_cred += cred;
422 if (unlikely(fl_starving(adap, q))) {
424 * Make sure data has been written to free list
434 * refill_fl - refill an SGE Rx buffer ring with mbufs
436 * @q: the ring to refill
437 * @n: the number of new buffers to allocate
439 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
440 * allocated with the supplied gfp flags. The caller must assure that
441 * @n does not exceed the queue's capacity. Returns the number of buffers
444 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
446 return refill_fl_usembufs(adap, q, n);
449 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
451 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
455 * Return the number of reclaimable descriptors in a Tx queue.
457 static inline int reclaimable(const struct sge_txq *q)
459 int hw_cidx = ntohs(q->stat->cidx);
463 return hw_cidx + q->size;
468 * reclaim_completed_tx - reclaims completed Tx descriptors
469 * @q: the Tx queue to reclaim completed descriptors from
471 * Reclaims Tx descriptors that the SGE has indicated it has processed.
473 void reclaim_completed_tx(struct sge_txq *q)
475 unsigned int avail = reclaimable(q);
478 /* reclaim as much as possible */
479 reclaim_tx_desc(q, avail);
481 avail = reclaimable(q);
486 * sgl_len - calculates the size of an SGL of the given capacity
487 * @n: the number of SGL entries
489 * Calculates the number of flits needed for a scatter/gather list that
490 * can hold the given number of entries.
492 static inline unsigned int sgl_len(unsigned int n)
495 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
496 * addresses. The DSGL Work Request starts off with a 32-bit DSGL
497 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
498 * repeated sequences of { Length[i], Length[i+1], Address[i],
499 * Address[i+1] } (this ensures that all addresses are on 64-bit
500 * boundaries). If N is even, then Length[N+1] should be set to 0 and
501 * Address[N+1] is omitted.
503 * The following calculation incorporates all of the above. It's
504 * somewhat hard to follow but, briefly: the "+2" accounts for the
505 * first two flits which include the DSGL header, Length0 and
506 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
507 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
508 * finally the "+((n-1)&1)" adds the one remaining flit needed if
512 return (3 * n) / 2 + (n & 1) + 2;
516 * flits_to_desc - returns the num of Tx descriptors for the given flits
517 * @n: the number of flits
519 * Returns the number of Tx descriptors needed for the supplied number
522 static inline unsigned int flits_to_desc(unsigned int n)
524 return DIV_ROUND_UP(n, 8);
528 * is_eth_imm - can an Ethernet packet be sent as immediate data?
531 * Returns whether an Ethernet packet is small enough to fit as
532 * immediate data. Return value corresponds to the headroom required.
534 static inline int is_eth_imm(const struct rte_mbuf *m)
536 unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
537 sizeof(struct cpl_tx_pkt_lso_core) : 0;
539 hdrlen += sizeof(struct cpl_tx_pkt);
540 if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
547 * calc_tx_flits - calculate the number of flits for a packet Tx WR
549 * @adap: adapter structure pointer
551 * Returns the number of flits needed for a Tx WR for the given Ethernet
552 * packet, including the needed WR and CPL headers.
554 static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
555 struct adapter *adap)
557 size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
558 sizeof(struct fw_eth_tx_pkt_vm_wr);
563 * If the mbuf is small enough, we can pump it out as a work request
564 * with only immediate data. In that case we just have to have the
565 * TX Packet header plus the mbuf data in the Work Request.
568 hdrlen = is_eth_imm(m);
570 return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
573 * Otherwise, we're going to have to construct a Scatter gather list
574 * of the mbuf body and fragments. We also include the flits necessary
575 * for the TX Packet Work Request and CPL. We always have a firmware
576 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
577 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
578 * message or, if we're doing a Large Send Offload, an LSO CPL message
579 * with an embedded TX Packet Write CPL message.
581 flits = sgl_len(m->nb_segs);
583 flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
584 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
587 sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
592 * write_sgl - populate a scatter/gather list for a packet
594 * @q: the Tx queue we are writing into
595 * @sgl: starting location for writing the SGL
596 * @end: points right after the end of the SGL
597 * @start: start offset into mbuf main-body data to include in the SGL
598 * @addr: address of mapped region
600 * Generates a scatter/gather list for the buffers that make up a packet.
601 * The caller must provide adequate space for the SGL that will be written.
602 * The SGL includes all of the packet's page fragments and the data in its
603 * main body except for the first @start bytes. @sgl must be 16-byte
604 * aligned and within a Tx descriptor with available space. @end points
605 * write after the end of the SGL but does not account for any potential
606 * wrap around, i.e., @end > @sgl.
608 static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
609 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
610 const dma_addr_t *addr)
613 struct ulptx_sge_pair *to;
614 struct rte_mbuf *m = mbuf;
615 unsigned int nfrags = m->nb_segs;
616 struct ulptx_sge_pair buf[nfrags / 2];
618 len = m->data_len - start;
619 sgl->len0 = htonl(len);
620 sgl->addr0 = rte_cpu_to_be_64(addr[0]);
622 sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
623 V_ULPTX_NSGE(nfrags));
624 if (likely(--nfrags == 0))
627 * Most of the complexity below deals with the possibility we hit the
628 * end of the queue in the middle of writing the SGL. For this case
629 * only we create the SGL in a temporary buffer and then copy it.
631 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
633 for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
635 to->len[0] = rte_cpu_to_be_32(m->data_len);
636 to->addr[0] = rte_cpu_to_be_64(addr[++i]);
638 to->len[1] = rte_cpu_to_be_32(m->data_len);
639 to->addr[1] = rte_cpu_to_be_64(addr[++i]);
643 to->len[0] = rte_cpu_to_be_32(m->data_len);
644 to->len[1] = rte_cpu_to_be_32(0);
645 to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
647 if (unlikely((u8 *)end > (u8 *)q->stat)) {
648 unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
653 memcpy(sgl->sge, buf, part0);
654 part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
655 rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
656 end = RTE_PTR_ADD((void *)q->desc, part1);
658 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
662 #define IDXDIFF(head, tail, wrap) \
663 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
665 #define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
666 #define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
668 #define PIDXDIFF(head, tail, wrap) \
669 ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
670 #define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
673 * ring_tx_db - ring a Tx queue's doorbell
676 * @n: number of new descriptors to give to HW
678 * Ring the doorbel for a Tx queue.
680 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
682 int n = Q_IDXDIFF(q, dbidx);
685 * Make sure that all writes to the TX Descriptors are committed
686 * before we tell the hardware about them.
691 * If we don't have access to the new User Doorbell (T5+), use the old
692 * doorbell mechanism; otherwise use the new BAR2 mechanism.
694 if (unlikely(!q->bar2_addr)) {
698 * For T4 we need to participate in the Doorbell Recovery
702 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
703 V_QID(q->cntxt_id) | val);
706 q->db_pidx = q->pidx;
708 u32 val = V_PIDX_T5(n);
711 * T4 and later chips share the same PIDX field offset within
712 * the doorbell, but T5 and later shrank the field in order to
713 * gain a bit for Doorbell Priority. The field was absurdly
714 * large in the first place (14 bits) so we just use the T5
715 * and later limits and warn if a Queue ID is too large.
717 WARN_ON(val & F_DBPRIO);
719 writel(val | V_QID(q->bar2_qid),
720 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
723 * This Write Memory Barrier will force the write to the User
724 * Doorbell area to be flushed. This is needed to prevent
725 * writes on different CPUs for the same queue from hitting
726 * the adapter out of order. This is required when some Work
727 * Requests take the Write Combine Gather Buffer path (user
728 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
729 * take the traditional path where we simply increment the
730 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
731 * hardware DMA read the actual Work Request.
739 * Figure out what HW csum a packet wants and return the appropriate control
742 static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
746 if (m->ol_flags & PKT_TX_IP_CKSUM) {
747 switch (m->ol_flags & PKT_TX_L4_MASK) {
748 case PKT_TX_TCP_CKSUM:
749 csum_type = TX_CSUM_TCPIP;
751 case PKT_TX_UDP_CKSUM:
752 csum_type = TX_CSUM_UDPIP;
761 if (likely(csum_type >= TX_CSUM_TCPIP)) {
762 u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
763 int eth_hdr_len = m->l2_len;
765 if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
766 hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
768 hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
769 return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
773 * unknown protocol, disable HW csum
774 * and hope a bad packet is detected
776 return F_TXPKT_L4CSUM_DIS;
779 static inline void txq_advance(struct sge_txq *q, unsigned int n)
783 if (q->pidx >= q->size)
787 #define MAX_COALESCE_LEN 64000
789 static inline int wraps_around(struct sge_txq *q, int ndesc)
791 return (q->pidx + ndesc) > q->size ? 1 : 0;
794 static void tx_timer_cb(void *data)
796 struct adapter *adap = (struct adapter *)data;
797 struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
799 unsigned int coal_idx;
801 /* monitor any pending tx */
802 for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
803 if (t4_os_trylock(&txq->txq_lock)) {
804 coal_idx = txq->q.coalesce.idx;
806 if (coal_idx == txq->q.last_coal_idx &&
807 txq->q.pidx == txq->q.last_pidx) {
808 ship_tx_pkt_coalesce_wr(adap, txq);
810 txq->q.last_coal_idx = coal_idx;
811 txq->q.last_pidx = txq->q.pidx;
814 t4_os_unlock(&txq->txq_lock);
817 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
821 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
822 * @ adap: adapter structure
825 * writes the different fields of the pkts WR and sends it.
827 static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
828 struct sge_eth_txq *txq)
830 struct fw_eth_tx_pkts_vm_wr *vmwr;
831 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
832 sizeof(vmwr->ethmacsrc) +
833 sizeof(vmwr->ethtype) +
834 sizeof(vmwr->vlantci));
835 struct fw_eth_tx_pkts_wr *wr;
836 struct sge_txq *q = &txq->q;
840 /* fill the pkts WR header */
841 wr = (void *)&q->desc[q->pidx];
842 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
843 vmwr = (void *)&q->desc[q->pidx];
845 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
846 ndesc = flits_to_desc(q->coalesce.flits);
847 wr->equiq_to_len16 = htonl(wr_mid);
848 wr->plen = cpu_to_be16(q->coalesce.len);
849 wr->npkt = q->coalesce.idx;
852 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
853 wr->type = q->coalesce.type;
855 wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
857 memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
861 /* zero out coalesce structure members */
862 memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
864 txq_advance(q, ndesc);
865 txq->stats.coal_wr++;
866 txq->stats.coal_pkts += wr->npkt;
868 if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
869 q->equeidx = q->pidx;
870 wr_mid |= F_FW_WR_EQUEQ;
871 wr->equiq_to_len16 = htonl(wr_mid);
877 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
878 * @txq: tx queue where the mbuf is sent
879 * @mbuf: mbuf to be sent
880 * @nflits: return value for number of flits needed
881 * @adap: adapter structure
883 * This function decides if a packet should be coalesced or not.
885 static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
886 struct rte_mbuf *mbuf,
887 unsigned int *nflits,
888 struct adapter *adap)
890 struct fw_eth_tx_pkts_vm_wr *wr;
891 const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
892 sizeof(wr->ethmacsrc) +
893 sizeof(wr->ethtype) +
894 sizeof(wr->vlantci));
895 struct sge_txq *q = &txq->q;
896 unsigned int flits, ndesc;
897 unsigned char type = 0;
898 int credits, wr_size;
900 /* use coal WR type 1 when no frags are present */
901 type = (mbuf->nb_segs == 1) ? 1 : 0;
906 if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
907 rte_pktmbuf_mtod(mbuf, void *),
909 ship_tx_pkt_coalesce_wr(adap, txq);
912 if (unlikely(type != q->coalesce.type && q->coalesce.idx))
913 ship_tx_pkt_coalesce_wr(adap, txq);
915 /* calculate the number of flits required for coalescing this packet
916 * without the 2 flits of the WR header. These are added further down
917 * if we are just starting in new PKTS WR. sgl_len doesn't account for
918 * the possible 16 bytes alignment ULP TX commands so we do it here.
920 flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
922 flits += (sizeof(struct ulp_txpkt) +
923 sizeof(struct ulptx_idata)) / sizeof(__be64);
924 flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
927 /* If coalescing is on, the mbuf is added to a pkts WR */
928 if (q->coalesce.idx) {
929 ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
930 credits = txq_avail(q) - ndesc;
932 /* If we are wrapping or this is last mbuf then, send the
933 * already coalesced mbufs and let the non-coalesce pass
936 if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
937 ship_tx_pkt_coalesce_wr(adap, txq);
941 /* If the max coalesce len or the max WR len is reached
942 * ship the WR and keep coalescing on.
944 if (unlikely((q->coalesce.len + mbuf->pkt_len >
946 (q->coalesce.flits + flits >
948 ship_tx_pkt_coalesce_wr(adap, txq);
955 /* start a new pkts WR, the WR header is not filled below */
956 wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
957 sizeof(struct fw_eth_tx_pkts_vm_wr);
958 flits += wr_size / sizeof(__be64);
959 ndesc = flits_to_desc(q->coalesce.flits + flits);
960 credits = txq_avail(q) - ndesc;
962 if (unlikely(credits < 0 || wraps_around(q, ndesc)))
964 q->coalesce.flits += wr_size / sizeof(__be64);
965 q->coalesce.type = type;
966 q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
967 q->coalesce.flits * sizeof(__be64);
969 memcpy((void *)q->coalesce.ethmacdst,
970 rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
975 * tx_do_packet_coalesce - add an mbuf to a coalesce WR
976 * @txq: sge_eth_txq used send the mbuf
977 * @mbuf: mbuf to be sent
978 * @flits: flits needed for this mbuf
979 * @adap: adapter structure
980 * @pi: port_info structure
981 * @addr: mapped address of the mbuf
983 * Adds an mbuf to be sent as part of a coalesce WR by filling a
984 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
985 * ulp_tx_sc_dsgl command.
987 static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
988 struct rte_mbuf *mbuf,
989 int flits, struct adapter *adap,
990 const struct port_info *pi,
991 dma_addr_t *addr, uint16_t nb_pkts)
994 struct sge_txq *q = &txq->q;
995 struct ulp_txpkt *mc;
996 struct ulptx_idata *sc_imm;
997 struct cpl_tx_pkt_core *cpl;
998 struct tx_sw_desc *sd;
999 unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
1000 unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
1001 ETH_COALESCE_VF_PKT_NUM;
1003 #ifdef RTE_LIBRTE_CXGBE_TPUT
1004 RTE_SET_USED(nb_pkts);
1007 if (q->coalesce.type == 0) {
1008 mc = (struct ulp_txpkt *)q->coalesce.ptr;
1009 mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
1010 V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
1012 mc->len = htonl(DIV_ROUND_UP(flits, 2));
1013 sc_imm = (struct ulptx_idata *)(mc + 1);
1014 sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
1016 sc_imm->len = htonl(sizeof(*cpl));
1017 end = (u64 *)mc + flits;
1018 cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
1020 end = (u64 *)q->coalesce.ptr + flits;
1021 cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
1024 /* update coalesce structure for this txq */
1025 q->coalesce.flits += flits;
1026 q->coalesce.ptr += flits * sizeof(__be64);
1027 q->coalesce.len += mbuf->pkt_len;
1029 /* fill the cpl message, same as in t4_eth_xmit, this should be kept
1030 * similar to t4_eth_xmit
1032 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1033 cntrl = hwcsum(adap->params.chip, mbuf) |
1035 txq->stats.tx_cso++;
1037 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1040 if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1041 txq->stats.vlan_ins++;
1042 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
1045 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
1047 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
1048 V_TXPKT_PF(adap->pf));
1050 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
1051 cpl->pack = htons(0);
1052 cpl->len = htons(len);
1053 cpl->ctrl1 = cpu_to_be64(cntrl);
1054 write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0, addr);
1056 txq->stats.tx_bytes += len;
1058 sd = &q->sdesc[q->pidx + (idx >> 1)];
1060 if (sd->coalesce.idx) {
1063 for (i = 0; i < sd->coalesce.idx; i++) {
1064 rte_pktmbuf_free(sd->coalesce.mbuf[i]);
1065 sd->coalesce.mbuf[i] = NULL;
1070 /* store pointers to the mbuf and the sgl used in free_tx_desc.
1071 * each tx desc can hold two pointers corresponding to the value
1072 * of ETH_COALESCE_PKT_PER_DESC
1074 sd->coalesce.mbuf[idx & 1] = mbuf;
1075 sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
1076 sd->coalesce.idx = (idx & 1) + 1;
1078 /* send the coaelsced work request if max reached */
1079 if (++q->coalesce.idx == max_coal_pkt_num
1080 #ifndef RTE_LIBRTE_CXGBE_TPUT
1081 || q->coalesce.idx >= nb_pkts
1084 ship_tx_pkt_coalesce_wr(adap, txq);
1089 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1090 * @txq: the egress queue
1093 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1095 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
1098 const struct port_info *pi;
1099 struct cpl_tx_pkt_lso_core *lso;
1100 struct adapter *adap;
1101 struct rte_mbuf *m = mbuf;
1102 struct fw_eth_tx_pkt_wr *wr;
1103 struct fw_eth_tx_pkt_vm_wr *vmwr;
1104 struct cpl_tx_pkt_core *cpl;
1105 struct tx_sw_desc *d;
1106 dma_addr_t addr[m->nb_segs];
1107 unsigned int flits, ndesc, cflits;
1108 int l3hdr_len, l4hdr_len, eth_xtra_len;
1114 u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
1116 /* Reject xmit if queue is stopped */
1117 if (unlikely(txq->flags & EQ_STOPPED))
1121 * The chip min packet length is 10 octets but play safe and reject
1122 * anything shorter than an Ethernet header.
1124 if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
1126 rte_pktmbuf_free(m);
1130 if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
1131 (unlikely(m->pkt_len > max_pkt_len)))
1134 pi = (struct port_info *)txq->data->dev_private;
1137 cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1138 /* align the end of coalesce WR to a 512 byte boundary */
1139 txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
1141 if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
1142 if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
1143 if (unlikely(map_mbuf(mbuf, addr) < 0)) {
1144 dev_warn(adap, "%s: mapping err for coalesce\n",
1146 txq->stats.mapping_err++;
1149 rte_prefetch0((volatile void *)addr);
1150 return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
1157 if (txq->q.coalesce.idx)
1158 ship_tx_pkt_coalesce_wr(adap, txq);
1160 flits = calc_tx_flits(m, adap);
1161 ndesc = flits_to_desc(flits);
1162 credits = txq_avail(&txq->q) - ndesc;
1164 if (unlikely(credits < 0)) {
1165 dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
1166 __func__, txq->q.cntxt_id, credits);
1170 if (unlikely(map_mbuf(m, addr) < 0)) {
1171 txq->stats.mapping_err++;
1175 wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1176 if (Q_IDXDIFF(&txq->q, equeidx) >= 64) {
1177 txq->q.equeidx = txq->q.pidx;
1178 wr_mid |= F_FW_WR_EQUEQ;
1181 wr = (void *)&txq->q.desc[txq->q.pidx];
1182 vmwr = (void *)&txq->q.desc[txq->q.pidx];
1183 wr->equiq_to_len16 = htonl(wr_mid);
1185 wr->r3 = rte_cpu_to_be_64(0);
1186 end = (u64 *)wr + flits;
1188 const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
1189 sizeof(vmwr->ethmacsrc) +
1190 sizeof(vmwr->ethtype) +
1191 sizeof(vmwr->vlantci));
1193 vmwr->r3[0] = rte_cpu_to_be_32(0);
1194 vmwr->r3[1] = rte_cpu_to_be_32(0);
1195 memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
1197 end = (u64 *)vmwr + flits;
1201 len += sizeof(*cpl);
1203 /* Coalescing skipped and we send through normal path */
1204 if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
1205 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
1207 FW_ETH_TX_PKT_VM_WR) |
1208 V_FW_WR_IMMDLEN(len));
1210 cpl = (void *)(wr + 1);
1212 cpl = (void *)(vmwr + 1);
1213 if (m->ol_flags & PKT_TX_IP_CKSUM) {
1214 cntrl = hwcsum(adap->params.chip, m) |
1216 txq->stats.tx_cso++;
1220 lso = (void *)(wr + 1);
1222 lso = (void *)(vmwr + 1);
1223 v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
1224 l3hdr_len = m->l3_len;
1225 l4hdr_len = m->l4_len;
1226 eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
1227 len += sizeof(*lso);
1228 wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
1230 FW_ETH_TX_PKT_VM_WR) |
1231 V_FW_WR_IMMDLEN(len));
1232 lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
1233 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
1235 V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1236 V_LSO_IPHDR_LEN(l3hdr_len / 4) |
1237 V_LSO_TCPHDR_LEN(l4hdr_len / 4));
1238 lso->ipid_ofst = htons(0);
1239 lso->mss = htons(m->tso_segsz);
1240 lso->seqno_offset = htonl(0);
1241 if (is_t4(adap->params.chip))
1242 lso->len = htonl(m->pkt_len);
1244 lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
1245 cpl = (void *)(lso + 1);
1247 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1248 cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
1250 cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
1252 cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
1254 V_TXPKT_IPHDR_LEN(l3hdr_len);
1256 txq->stats.tx_cso += m->tso_segsz;
1259 if (m->ol_flags & PKT_TX_VLAN_PKT) {
1260 txq->stats.vlan_ins++;
1261 cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
1264 cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
1266 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
1267 V_TXPKT_PF(adap->pf));
1269 cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
1272 cpl->pack = htons(0);
1273 cpl->len = htons(m->pkt_len);
1274 cpl->ctrl1 = cpu_to_be64(cntrl);
1277 txq->stats.tx_bytes += m->pkt_len;
1278 last_desc = txq->q.pidx + ndesc - 1;
1279 if (last_desc >= (int)txq->q.size)
1280 last_desc -= txq->q.size;
1282 d = &txq->q.sdesc[last_desc];
1283 if (d->coalesce.idx) {
1286 for (i = 0; i < d->coalesce.idx; i++) {
1287 rte_pktmbuf_free(d->coalesce.mbuf[i]);
1288 d->coalesce.mbuf[i] = NULL;
1290 d->coalesce.idx = 0;
1292 write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1294 txq->q.sdesc[last_desc].mbuf = m;
1295 txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1296 txq_advance(&txq->q, ndesc);
1297 ring_tx_db(adap, &txq->q);
1302 * alloc_ring - allocate resources for an SGE descriptor ring
1303 * @dev: the PCI device's core device
1304 * @nelem: the number of descriptors
1305 * @elem_size: the size of each descriptor
1306 * @sw_size: the size of the SW state associated with each ring element
1307 * @phys: the physical address of the allocated ring
1308 * @metadata: address of the array holding the SW state for the ring
1309 * @stat_size: extra space in HW ring for status information
1310 * @node: preferred node for memory allocations
1312 * Allocates resources for an SGE descriptor ring, such as Tx queues,
1313 * free buffer lists, or response queues. Each SGE ring requires
1314 * space for its HW descriptors plus, optionally, space for the SW state
1315 * associated with each HW entry (the metadata). The function returns
1316 * three values: the virtual address for the HW ring (the return value
1317 * of the function), the bus address of the HW ring, and the address
1320 static void *alloc_ring(size_t nelem, size_t elem_size,
1321 size_t sw_size, dma_addr_t *phys, void *metadata,
1322 size_t stat_size, __rte_unused uint16_t queue_id,
1323 int socket_id, const char *z_name,
1324 const char *z_name_sw)
1326 size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
1327 const struct rte_memzone *tz;
1330 dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1331 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
1332 " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
1333 stat_size, queue_id, socket_id, z_name, z_name_sw);
1335 tz = rte_memzone_lookup(z_name);
1337 dev_debug(adapter, "%s: tz exists...returning existing..\n",
1343 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
1344 * handle the maximum ring size is allocated in order to allow for
1345 * resizing in later calls to the queue setup function.
1347 tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
1352 memset(tz->addr, 0, len);
1354 s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
1355 RTE_CACHE_LINE_SIZE, socket_id);
1358 dev_err(adapter, "%s: failed to get sw_ring memory\n",
1364 *(void **)metadata = s;
1366 *phys = (uint64_t)tz->iova;
1371 * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
1372 * @gl: the gather list
1374 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1375 * %NULL if mbuf allocation failed.
1377 static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
1380 * If there's only one mbuf fragment, just return that.
1382 if (likely(gl->nfrags == 1))
1383 return gl->mbufs[0];
1389 * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
1390 * @gl: the gather list
1392 * Builds an mbuf from the given packet gather list. Returns the mbuf or
1393 * %NULL if mbuf allocation failed.
1395 static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
1397 return t4_pktgl_to_mbuf_usembufs(gl);
1401 * t4_ethrx_handler - process an ingress ethernet packet
1402 * @q: the response queue that received the packet
1403 * @rsp: the response queue descriptor holding the RX_PKT message
1404 * @si: the gather list of packet fragments
1406 * Process an ingress ethernet packet and deliver it to the stack.
1408 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1409 const struct pkt_gl *si)
1411 struct rte_mbuf *mbuf;
1412 const struct cpl_rx_pkt *pkt;
1413 const struct rss_header *rss_hdr;
1415 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1418 rss_hdr = (const void *)rsp;
1419 pkt = (const void *)&rsp[1];
1420 /* Compressed error vector is enabled for T6 only */
1421 if (q->adapter->params.tp.rx_pkt_encap)
1422 err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
1424 err_vec = ntohs(pkt->err_vec);
1425 csum_ok = pkt->csum_calc && !err_vec;
1427 mbuf = t4_pktgl_to_mbuf(si);
1428 if (unlikely(!mbuf)) {
1429 rxq->stats.rx_drops++;
1433 mbuf->port = pkt->iff;
1434 if (pkt->l2info & htonl(F_RXF_IP)) {
1435 mbuf->packet_type = RTE_PTYPE_L3_IPV4;
1436 if (unlikely(!csum_ok))
1437 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1439 if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
1440 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1441 } else if (pkt->l2info & htonl(F_RXF_IP6)) {
1442 mbuf->packet_type = RTE_PTYPE_L3_IPV6;
1445 mbuf->port = pkt->iff;
1447 if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
1448 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1449 mbuf->hash.rss = ntohl(rss_hdr->hash_val);
1453 mbuf->ol_flags |= PKT_RX_VLAN;
1454 mbuf->vlan_tci = ntohs(pkt->vlan);
1457 rxq->stats.rx_bytes += mbuf->pkt_len;
1462 #define CXGB4_MSG_AN ((void *)1)
1465 * rspq_next - advance to the next entry in a response queue
1468 * Updates the state of a response queue to advance it to the next entry.
1470 static inline void rspq_next(struct sge_rspq *q)
1472 q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
1473 if (unlikely(++q->cidx == q->size)) {
1476 q->cur_desc = q->desc;
1481 * process_responses - process responses from an SGE response queue
1482 * @q: the ingress queue to process
1483 * @budget: how many responses can be processed in this round
1484 * @rx_pkts: mbuf to put the pkts
1486 * Process responses from an SGE response queue up to the supplied budget.
1487 * Responses include received packets as well as control messages from FW
1490 * Additionally choose the interrupt holdoff time for the next interrupt
1491 * on this queue. If the system is under memory shortage use a fairly
1492 * long delay to help recovery.
1494 static int process_responses(struct sge_rspq *q, int budget,
1495 struct rte_mbuf **rx_pkts)
1497 int ret = 0, rsp_type;
1498 int budget_left = budget;
1499 const struct rsp_ctrl *rc;
1500 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1502 while (likely(budget_left)) {
1503 if (q->cidx == ntohs(q->stat->pidx))
1506 rc = (const struct rsp_ctrl *)
1507 ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
1510 * Ensure response has been read
1513 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1515 if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
1516 struct sge *s = &q->adapter->sge;
1517 unsigned int stat_pidx;
1520 stat_pidx = ntohs(q->stat->pidx);
1521 stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
1522 while (stat_pidx_diff && budget_left) {
1523 const struct rx_sw_desc *rsd =
1524 &rxq->fl.sdesc[rxq->fl.cidx];
1525 const struct rss_header *rss_hdr =
1526 (const void *)q->cur_desc;
1527 const struct cpl_rx_pkt *cpl =
1528 (const void *)&q->cur_desc[1];
1529 struct rte_mbuf *pkt, *npkt;
1534 rc = (const struct rsp_ctrl *)
1535 ((const char *)q->cur_desc +
1536 (q->iqe_len - sizeof(*rc)));
1538 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1539 if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
1542 len = ntohl(rc->pldbuflen_qid);
1543 BUG_ON(!(len & F_RSPD_NEWBUF));
1546 len = G_RSPD_LEN(len);
1549 /* Compressed error vector is enabled for
1552 if (q->adapter->params.tp.rx_pkt_encap)
1553 err_vec = G_T6_COMPR_RXERR_VEC(
1554 ntohs(cpl->err_vec));
1556 err_vec = ntohs(cpl->err_vec);
1557 csum_ok = cpl->csum_calc && !err_vec;
1559 /* Chain mbufs into len if necessary */
1561 struct rte_mbuf *new_pkt = rsd->buf;
1563 bufsz = min(get_buf_size(q->adapter,
1565 new_pkt->data_len = bufsz;
1566 unmap_rx_buf(&rxq->fl);
1568 npkt->next = new_pkt;
1571 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1576 if (cpl->l2info & htonl(F_RXF_IP)) {
1577 pkt->packet_type = RTE_PTYPE_L3_IPV4;
1578 if (unlikely(!csum_ok))
1580 PKT_RX_IP_CKSUM_BAD;
1583 htonl(F_RXF_UDP | F_RXF_TCP)) &&
1586 PKT_RX_L4_CKSUM_BAD;
1587 } else if (cpl->l2info & htonl(F_RXF_IP6)) {
1588 pkt->packet_type = RTE_PTYPE_L3_IPV6;
1591 if (!rss_hdr->filter_tid &&
1592 rss_hdr->hash_type) {
1593 pkt->ol_flags |= PKT_RX_RSS_HASH;
1595 ntohl(rss_hdr->hash_val);
1599 pkt->ol_flags |= PKT_RX_VLAN;
1600 pkt->vlan_tci = ntohs(cpl->vlan);
1603 rte_pktmbuf_adj(pkt, s->pktshift);
1605 rxq->stats.rx_bytes += pkt->pkt_len;
1606 rx_pkts[budget - budget_left] = pkt;
1613 } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
1614 ret = q->handler(q, q->cur_desc, NULL);
1616 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1619 if (unlikely(ret)) {
1620 /* couldn't process descriptor, back off for recovery */
1621 q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1630 * If this is a Response Queue with an associated Free List and
1631 * there's room for another chunk of new Free List buffer pointers,
1632 * refill the Free List.
1635 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1636 __refill_fl(q->adapter, &rxq->fl);
1638 return budget - budget_left;
1641 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
1642 unsigned int budget, unsigned int *work_done)
1644 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1645 unsigned int cidx_inc;
1646 unsigned int params;
1649 *work_done = process_responses(q, budget, rx_pkts);
1652 cidx_inc = R_IDXDIFF(q, gts_idx);
1654 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1655 __refill_fl(q->adapter, &rxq->fl);
1657 params = q->intr_params;
1658 q->next_intr_params = params;
1659 val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
1661 if (unlikely(!q->bar2_addr)) {
1662 u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
1663 T4VF_SGE_BASE_ADDR +
1666 t4_write_reg(q->adapter, reg,
1667 val | V_INGRESSQID((u32)q->cntxt_id));
1669 writel(val | V_INGRESSQID(q->bar2_qid),
1670 (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
1671 /* This Write memory Barrier will force the
1672 * write to the User Doorbell area to be
1677 q->gts_idx = q->cidx;
1683 * bar2_address - return the BAR2 address for an SGE Queue's Registers
1684 * @adapter: the adapter
1685 * @qid: the SGE Queue ID
1686 * @qtype: the SGE Queue Type (Egress or Ingress)
1687 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
1689 * Returns the BAR2 address for the SGE Queue Registers associated with
1690 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
1691 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
1692 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
1693 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
1695 static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
1696 enum t4_bar2_qtype qtype,
1697 unsigned int *pbar2_qid)
1702 ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
1706 return adapter->bar2 + bar2_qoffset;
1709 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
1711 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1712 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1714 return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
1715 rq->cntxt_id, fl_id, 0xffff);
1718 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
1720 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1721 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1723 return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
1724 rq->cntxt_id, fl_id, 0xffff);
1728 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
1729 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
1731 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1732 struct rte_eth_dev *eth_dev, int intr_idx,
1733 struct sge_fl *fl, rspq_handler_t hnd, int cong,
1734 struct rte_mempool *mp, int queue_id, int socket_id)
1738 struct sge *s = &adap->sge;
1739 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1740 char z_name[RTE_MEMZONE_NAMESIZE];
1741 char z_name_sw[RTE_MEMZONE_NAMESIZE];
1742 unsigned int nb_refill;
1745 /* Size needs to be multiple of 16, including status entry. */
1746 iq->size = cxgbe_roundup(iq->size, 16);
1748 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1749 eth_dev->device->driver->name,
1750 fwevtq ? "fwq_ring" : "rx_ring",
1751 eth_dev->data->port_id, queue_id);
1752 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1754 iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
1755 queue_id, socket_id, z_name, z_name_sw);
1759 memset(&c, 0, sizeof(c));
1760 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1761 F_FW_CMD_WRITE | F_FW_CMD_EXEC);
1764 pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
1765 c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
1766 V_FW_IQ_CMD_VFN(0));
1768 c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
1771 pciechan = pi->port_id;
1774 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1776 c.type_to_iqandstindex =
1777 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1778 V_FW_IQ_CMD_IQASYNCH(fwevtq) |
1779 V_FW_IQ_CMD_VIID(pi->viid) |
1780 V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
1781 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
1782 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1784 c.iqdroprss_to_iqesize =
1785 htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
1786 F_FW_IQ_CMD_IQGTSMODE |
1787 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
1788 V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
1789 c.iqsize = htons(iq->size);
1790 c.iqaddr = cpu_to_be64(iq->phys_addr);
1793 struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
1795 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
1798 * Allocate the ring for the hardware free list (with space
1799 * for its status page) along with the associated software
1800 * descriptor ring. The free list size needs to be a multiple
1801 * of the Egress Queue Unit and at least 2 Egress Units larger
1802 * than the SGE's Egress Congrestion Threshold
1803 * (fl_starve_thres - 1).
1805 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
1806 fl->size = s->fl_starve_thres - 1 + 2 * 8;
1807 fl->size = cxgbe_roundup(fl->size, 8);
1809 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1810 eth_dev->device->driver->name,
1811 fwevtq ? "fwq_ring" : "fl_ring",
1812 eth_dev->data->port_id, queue_id);
1813 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1815 fl->desc = alloc_ring(fl->size, sizeof(__be64),
1816 sizeof(struct rx_sw_desc),
1817 &fl->addr, &fl->sdesc, s->stat_len,
1818 queue_id, socket_id, z_name, z_name_sw);
1823 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
1824 c.iqns_to_fl0congen |=
1825 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1826 (unlikely(rxq->usembufs) ?
1827 0 : F_FW_IQ_CMD_FL0PACKEN) |
1828 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1829 F_FW_IQ_CMD_FL0PADEN);
1830 if (is_pf4(adap) && cong >= 0)
1831 c.iqns_to_fl0congen |=
1832 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1833 F_FW_IQ_CMD_FL0CONGCIF |
1834 F_FW_IQ_CMD_FL0CONGEN);
1836 /* In T6, for egress queue type FL there is internal overhead
1837 * of 16B for header going into FLM module.
1838 * Hence maximum allowed burst size will be 448 bytes.
1840 c.fl0dcaen_to_fl0cidxfthresh =
1841 htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
1842 X_FETCHBURSTMIN_128B :
1843 X_FETCHBURSTMIN_64B) |
1844 V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
1845 X_FETCHBURSTMAX_512B :
1846 X_FETCHBURSTMAX_256B));
1847 c.fl0size = htons(flsz);
1848 c.fl0addr = cpu_to_be64(fl->addr);
1852 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1854 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
1858 iq->cur_desc = iq->desc;
1862 iq->next_intr_params = iq->intr_params;
1863 iq->cntxt_id = ntohs(c.iqid);
1864 iq->abs_id = ntohs(c.physiqid);
1865 iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
1867 iq->size--; /* subtract status entry */
1868 iq->stat = (void *)&iq->desc[iq->size * 8];
1869 iq->eth_dev = eth_dev;
1871 iq->port_id = pi->pidx;
1874 /* set offset to -1 to distinguish ingress queues without FL */
1875 iq->offset = fl ? 0 : -1;
1878 fl->cntxt_id = ntohs(c.fl0id);
1883 fl->alloc_failed = 0;
1886 * Note, we must initialize the BAR2 Free List User Doorbell
1887 * information before refilling the Free List!
1889 fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
1890 T4_BAR2_QTYPE_EGRESS,
1893 nb_refill = refill_fl(adap, fl, fl_cap(fl));
1894 if (nb_refill != fl_cap(fl)) {
1896 dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
1903 * For T5 and later we attempt to set up the Congestion Manager values
1904 * of the new RX Ethernet Queue. This should really be handled by
1905 * firmware because it's more complex than any host driver wants to
1906 * get involved with and it's different per chip and this is almost
1907 * certainly wrong. Formware would be wrong as well, but it would be
1908 * a lot easier to fix in one place ... For now we do something very
1909 * simple (and hopefully less wrong).
1911 if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
1915 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1916 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1917 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
1919 val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
1921 val = V_CONMCTXT_CNGTPMODE(
1922 X_CONMCTXT_CNGTPMODE_CHANNEL);
1923 for (i = 0; i < 4; i++) {
1924 if (cong & (1 << i))
1925 val |= V_CONMCTXT_CNGCHMAP(1 <<
1929 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1932 dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1933 iq->cntxt_id, -ret);
1939 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1940 iq->cntxt_id, fl->cntxt_id, 0xffff);
1949 if (fl && fl->desc) {
1950 rte_free(fl->sdesc);
1958 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
1959 unsigned int abs_id)
1963 q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
1970 q->coalesce.idx = 0;
1971 q->coalesce.len = 0;
1972 q->coalesce.flits = 0;
1973 q->last_coal_idx = 0;
1975 q->stat = (void *)&q->desc[q->size];
1978 int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
1981 * TODO: For flow-control, queue may be stopped waiting to reclaim
1983 * Ensure queue is in EQ_STOPPED state before starting it.
1985 if (!(txq->flags & EQ_STOPPED))
1988 txq->flags &= ~EQ_STOPPED;
1993 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
1995 txq->flags |= EQ_STOPPED;
2000 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2001 struct rte_eth_dev *eth_dev, uint16_t queue_id,
2002 unsigned int iqid, int socket_id)
2005 struct fw_eq_eth_cmd c;
2006 struct sge *s = &adap->sge;
2007 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
2008 char z_name[RTE_MEMZONE_NAMESIZE];
2009 char z_name_sw[RTE_MEMZONE_NAMESIZE];
2012 /* Add status entries */
2013 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2015 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2016 eth_dev->device->driver->name, "tx_ring",
2017 eth_dev->data->port_id, queue_id);
2018 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
2020 txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
2021 sizeof(struct tx_sw_desc), &txq->q.phys_addr,
2022 &txq->q.sdesc, s->stat_len, queue_id,
2023 socket_id, z_name, z_name_sw);
2027 memset(&c, 0, sizeof(c));
2028 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
2029 F_FW_CMD_WRITE | F_FW_CMD_EXEC);
2031 pciechan = pi->tx_chan;
2032 c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
2033 V_FW_EQ_ETH_CMD_VFN(0));
2035 pciechan = pi->port_id;
2038 c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
2039 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
2040 c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
2041 V_FW_EQ_ETH_CMD_VIID(pi->viid));
2042 c.fetchszm_to_iqid =
2043 htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
2044 V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
2045 F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
2047 htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
2048 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
2049 V_FW_EQ_ETH_CMD_EQSIZE(nentries));
2050 c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
2053 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2055 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
2057 rte_free(txq->q.sdesc);
2058 txq->q.sdesc = NULL;
2063 init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
2064 G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
2066 txq->stats.pkts = 0;
2067 txq->stats.tx_cso = 0;
2068 txq->stats.coal_wr = 0;
2069 txq->stats.vlan_ins = 0;
2070 txq->stats.tx_bytes = 0;
2071 txq->stats.coal_pkts = 0;
2072 txq->stats.mapping_err = 0;
2073 txq->flags |= EQ_STOPPED;
2074 txq->eth_dev = eth_dev;
2075 txq->data = eth_dev->data;
2076 t4_os_lock_init(&txq->txq_lock);
2080 static void free_txq(struct sge_txq *q)
2087 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2090 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2092 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
2093 rq->cntxt_id, fl_id, 0xffff);
2099 free_rx_bufs(fl, fl->avail);
2100 rte_free(fl->sdesc);
2108 * Clear all queues of the port
2110 * Note: This function must only be called after rx and tx path
2111 * of the port have been disabled.
2113 void t4_sge_eth_clear_queues(struct port_info *pi)
2116 struct adapter *adap = pi->adapter;
2117 struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
2118 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
2120 for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
2122 t4_sge_eth_rxq_stop(adap, &rxq->rspq);
2124 for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
2126 struct sge_txq *q = &txq->q;
2128 t4_sge_eth_txq_stop(txq);
2129 reclaim_completed_tx(q);
2130 free_tx_desc(q, q->size);
2131 q->equeidx = q->pidx;
2136 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
2138 if (rxq->rspq.desc) {
2139 t4_sge_eth_rxq_stop(adap, &rxq->rspq);
2140 free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
2144 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
2147 t4_sge_eth_txq_stop(txq);
2148 reclaim_completed_tx(&txq->q);
2149 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
2150 free_tx_desc(&txq->q, txq->q.size);
2151 rte_free(txq->q.sdesc);
2156 void t4_sge_tx_monitor_start(struct adapter *adap)
2158 rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
2161 void t4_sge_tx_monitor_stop(struct adapter *adap)
2163 rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
2167 * t4_free_sge_resources - free SGE resources
2168 * @adap: the adapter
2170 * Frees resources used by the SGE queue sets.
2172 void t4_free_sge_resources(struct adapter *adap)
2175 struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
2176 struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
2178 /* clean up Ethernet Tx/Rx queues */
2179 for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
2180 /* Free only the queues allocated */
2181 if (rxq->rspq.desc) {
2182 t4_sge_eth_rxq_release(adap, rxq);
2183 rxq->rspq.eth_dev = NULL;
2186 t4_sge_eth_txq_release(adap, txq);
2187 txq->eth_dev = NULL;
2191 if (adap->sge.fw_evtq.desc)
2192 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2196 * t4_sge_init - initialize SGE
2197 * @adap: the adapter
2199 * Performs SGE initialization needed every time after a chip reset.
2200 * We do not initialize any of the queues here, instead the driver
2201 * top-level must request those individually.
2203 * Called in two different modes:
2205 * 1. Perform actual hardware initialization and record hard-coded
2206 * parameters which were used. This gets used when we're the
2207 * Master PF and the Firmware Configuration File support didn't
2208 * work for some reason.
2210 * 2. We're not the Master PF or initialization was performed with
2211 * a Firmware Configuration File. In this case we need to grab
2212 * any of the SGE operating parameters that we need to have in
2213 * order to do our job and make sure we can live with them ...
2215 static int t4_sge_init_soft(struct adapter *adap)
2217 struct sge *s = &adap->sge;
2218 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2219 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2220 u32 ingress_rx_threshold;
2223 * Verify that CPL messages are going to the Ingress Queue for
2224 * process_responses() and that only packet data is going to the
2227 if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
2228 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2229 dev_err(adap, "bad SGE CPL MODE\n");
2234 * Validate the Host Buffer Register Array indices that we want to
2237 * XXX Note that we should really read through the Host Buffer Size
2238 * XXX register array and find the indices of the Buffer Sizes which
2239 * XXX meet our needs!
2241 #define READ_FL_BUF(x) \
2242 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
2244 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2245 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2246 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2247 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2250 * We only bother using the Large Page logic if the Large Page Buffer
2251 * is larger than our Page Size Buffer.
2253 if (fl_large_pg <= fl_small_pg)
2259 * The Page Size Buffer must be exactly equal to our Page Size and the
2260 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2262 if (fl_small_pg != CXGBE_PAGE_SIZE ||
2263 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2264 dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
2265 fl_small_pg, fl_large_pg);
2269 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2271 if (adap->use_unpacked_mode) {
2274 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
2275 dev_err(adap, "bad SGE FL small MTU %d\n",
2279 if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2280 dev_err(adap, "bad SGE FL large MTU %d\n",
2289 * Retrieve our RX interrupt holdoff timer values and counter
2290 * threshold values from the SGE parameters.
2292 timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
2293 timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
2294 timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
2295 s->timer_val[0] = core_ticks_to_us(adap,
2296 G_TIMERVALUE0(timer_value_0_and_1));
2297 s->timer_val[1] = core_ticks_to_us(adap,
2298 G_TIMERVALUE1(timer_value_0_and_1));
2299 s->timer_val[2] = core_ticks_to_us(adap,
2300 G_TIMERVALUE2(timer_value_2_and_3));
2301 s->timer_val[3] = core_ticks_to_us(adap,
2302 G_TIMERVALUE3(timer_value_2_and_3));
2303 s->timer_val[4] = core_ticks_to_us(adap,
2304 G_TIMERVALUE4(timer_value_4_and_5));
2305 s->timer_val[5] = core_ticks_to_us(adap,
2306 G_TIMERVALUE5(timer_value_4_and_5));
2308 ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
2309 s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
2310 s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
2311 s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
2312 s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
2317 int t4_sge_init(struct adapter *adap)
2319 struct sge *s = &adap->sge;
2320 u32 sge_control, sge_conm_ctrl;
2321 int ret, egress_threshold;
2324 * Ingress Padding Boundary and Egress Status Page Size are set up by
2325 * t4_fixup_host_params().
2327 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
2328 s->pktshift = G_PKTSHIFT(sge_control);
2329 s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
2330 s->fl_align = t4_fl_pkt_align(adap);
2331 ret = t4_sge_init_soft(adap);
2333 dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
2339 * A FL with <= fl_starve_thres buffers is starving and a periodic
2340 * timer will attempt to refill it. This needs to be larger than the
2341 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2342 * stuck waiting for new packets while the SGE is waiting for us to
2343 * give it more Free List entries. (Note that the SGE's Egress
2344 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2345 * there was only a single field to control this. For T5 there's the
2346 * original field which now only applies to Unpacked Mode Free List
2347 * buffers and a new field which only applies to Packed Mode Free List
2350 sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
2351 if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
2352 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
2354 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
2355 s->fl_starve_thres = 2 * egress_threshold + 1;
2360 int t4vf_sge_init(struct adapter *adap)
2362 struct sge_params *sge_params = &adap->params.sge;
2363 u32 sge_ingress_queues_per_page;
2364 u32 sge_egress_queues_per_page;
2365 u32 sge_control, sge_control2;
2366 u32 fl_small_pg, fl_large_pg;
2367 u32 sge_ingress_rx_threshold;
2368 u32 sge_timer_value_0_and_1;
2369 u32 sge_timer_value_2_and_3;
2370 u32 sge_timer_value_4_and_5;
2371 u32 sge_congestion_control;
2372 struct sge *s = &adap->sge;
2373 unsigned int s_hps, s_qpp;
2374 u32 sge_host_page_size;
2375 u32 params[7], vals[7];
2378 /* query basic params from fw */
2379 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2380 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
2381 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2382 V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
2383 params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2384 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
2385 params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2386 V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
2387 params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2388 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
2389 params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2390 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
2391 params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2392 V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
2393 v = t4vf_query_params(adap, 7, params, vals);
2394 if (v != FW_SUCCESS)
2397 sge_control = vals[0];
2398 sge_host_page_size = vals[1];
2399 fl_small_pg = vals[2];
2400 fl_large_pg = vals[3];
2401 sge_timer_value_0_and_1 = vals[4];
2402 sge_timer_value_2_and_3 = vals[5];
2403 sge_timer_value_4_and_5 = vals[6];
2406 * Start by vetting the basic SGE parameters which have been set up by
2407 * the Physical Function Driver.
2410 /* We only bother using the Large Page logic if the Large Page Buffer
2411 * is larger than our Page Size Buffer.
2413 if (fl_large_pg <= fl_small_pg)
2416 /* The Page Size Buffer must be exactly equal to our Page Size and the
2417 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2419 if (fl_small_pg != CXGBE_PAGE_SIZE ||
2420 (fl_large_pg & (fl_large_pg - 1)) != 0) {
2421 dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2422 fl_small_pg, fl_large_pg);
2426 if ((sge_control & F_RXPKTCPLMODE) !=
2427 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2428 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2433 /* Grab ingress packing boundary from SGE_CONTROL2 for */
2434 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2435 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
2436 v = t4vf_query_params(adap, 1, params, vals);
2437 if (v != FW_SUCCESS) {
2438 dev_err(adapter, "Unable to get SGE Control2; "
2439 "probably old firmware.\n");
2442 sge_control2 = vals[0];
2444 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2445 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
2446 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2447 V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
2448 v = t4vf_query_params(adap, 2, params, vals);
2449 if (v != FW_SUCCESS)
2451 sge_ingress_rx_threshold = vals[0];
2452 sge_congestion_control = vals[1];
2453 params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2454 V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
2455 params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
2456 V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
2457 v = t4vf_query_params(adap, 2, params, vals);
2458 if (v != FW_SUCCESS) {
2459 dev_warn(adap, "Unable to get VF SGE Queues/Page; "
2460 "probably old firmware.\n");
2463 sge_egress_queues_per_page = vals[0];
2464 sge_ingress_queues_per_page = vals[1];
2467 * We need the Queues/Page for our VF. This is based on the
2468 * PF from which we're instantiated and is indexed in the
2469 * register we just read.
2471 s_hps = (S_HOSTPAGESIZEPF0 +
2472 (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
2474 ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
2476 s_qpp = (S_QUEUESPERPAGEPF0 +
2477 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
2478 sge_params->eq_qpp =
2479 ((sge_egress_queues_per_page >> s_qpp)
2480 & M_QUEUESPERPAGEPF0);
2481 sge_params->iq_qpp =
2482 ((sge_ingress_queues_per_page >> s_qpp)
2483 & M_QUEUESPERPAGEPF0);
2486 * Now translate the queried parameters into our internal forms.
2489 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2490 s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
2492 s->pktshift = G_PKTSHIFT(sge_control);
2493 s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
2496 * A FL with <= fl_starve_thres buffers is starving and a periodic
2497 * timer will attempt to refill it. This needs to be larger than the
2498 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2499 * stuck waiting for new packets while the SGE is waiting for us to
2500 * give it more Free List entries. (Note that the SGE's Egress
2501 * Congestion Threshold is in units of 2 Free List pointers.)
2503 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
2505 s->fl_starve_thres =
2506 G_EGRTHRESHOLDPACKING(sge_congestion_control);
2510 s->fl_starve_thres =
2511 G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
2514 s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
2517 * Save RX interrupt holdoff timer values and counter
2518 * threshold values from the SGE parameters.
2520 s->timer_val[0] = core_ticks_to_us(adap,
2521 G_TIMERVALUE0(sge_timer_value_0_and_1));
2522 s->timer_val[1] = core_ticks_to_us(adap,
2523 G_TIMERVALUE1(sge_timer_value_0_and_1));
2524 s->timer_val[2] = core_ticks_to_us(adap,
2525 G_TIMERVALUE2(sge_timer_value_2_and_3));
2526 s->timer_val[3] = core_ticks_to_us(adap,
2527 G_TIMERVALUE3(sge_timer_value_2_and_3));
2528 s->timer_val[4] = core_ticks_to_us(adap,
2529 G_TIMERVALUE4(sge_timer_value_4_and_5));
2530 s->timer_val[5] = core_ticks_to_us(adap,
2531 G_TIMERVALUE5(sge_timer_value_4_and_5));
2532 s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
2533 s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
2534 s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
2535 s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);