4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/if_ether.h>
35 #include <sys/queue.h>
43 #include <netinet/in.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
48 #include <rte_interrupts.h>
50 #include <rte_debug.h>
52 #include <rte_atomic.h>
53 #include <rte_branch_prediction.h>
54 #include <rte_memory.h>
55 #include <rte_memzone.h>
56 #include <rte_tailq.h>
58 #include <rte_alarm.h>
59 #include <rte_ether.h>
60 #include <rte_ethdev.h>
61 #include <rte_atomic.h>
62 #include <rte_malloc.h>
63 #include <rte_random.h>
72 * Max number of Rx buffers we replenish at a time.
74 #define MAX_RX_REFILL 16U
76 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
79 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
80 * per mbuf buffer). We currently only support two sizes for 1500- and
81 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
82 * much need for that ...
84 #define FL_MTU_SMALL 1500
85 #define FL_MTU_LARGE 9000
87 static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
90 struct sge *s = &adapter->sge;
92 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
95 #define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
96 #define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
99 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
100 * these to specify the buffer size as an index into the SGE Free List Buffer
101 * Size register array. We also use bit 4, when the buffer has been unmapped
102 * for DMA, but this is of course never sent to the hardware and is only used
103 * to prevent double unmappings. All of the above requires that the Free List
104 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
105 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
106 * Free List Buffer alignment is 32 bytes, this works out for us ...
109 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
110 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
111 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
114 * XXX We shouldn't depend on being able to use these indices.
115 * XXX Especially when some other Master PF has initialized the
116 * XXX adapter or we use the Firmware Configuration File. We
117 * XXX should really search through the Host Buffer Size register
118 * XXX array for the appropriately sized buffer indices.
120 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
121 RX_LARGE_PG_BUF = 0x1, /* buffer large page buffer */
123 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
124 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
128 * fl_cap - return the capacity of a free-buffer list
131 * Returns the capacity of a free-buffer list. The capacity is less than
132 * the size because one descriptor needs to be left unpopulated, otherwise
133 * HW will think the FL is empty.
135 static inline unsigned int fl_cap(const struct sge_fl *fl)
137 return fl->size - 8; /* 1 descriptor = 8 buffers */
141 * fl_starving - return whether a Free List is starving.
142 * @adapter: pointer to the adapter
145 * Tests specified Free List to see whether the number of buffers
146 * available to the hardware has falled below our "starvation"
149 static inline bool fl_starving(const struct adapter *adapter,
150 const struct sge_fl *fl)
152 const struct sge *s = &adapter->sge;
154 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
157 static inline unsigned int get_buf_size(struct adapter *adapter,
158 const struct rx_sw_desc *d)
160 struct sge *s = &adapter->sge;
161 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
162 unsigned int buf_size;
164 switch (rx_buf_size_idx) {
165 case RX_SMALL_PG_BUF:
166 buf_size = PAGE_SIZE;
169 case RX_LARGE_PG_BUF:
170 buf_size = PAGE_SIZE << s->fl_pg_order;
173 case RX_SMALL_MTU_BUF:
174 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
177 case RX_LARGE_MTU_BUF:
178 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
183 buf_size = 0; /* deal with bogus compiler warnings */
191 * free_rx_bufs - free the Rx buffers on an SGE free list
192 * @q: the SGE free list to free buffers from
193 * @n: how many buffers to free
195 * Release the next @n buffers on an SGE free-buffer Rx queue. The
196 * buffers must be made inaccessible to HW before calling this function.
198 static void free_rx_bufs(struct sge_fl *q, int n)
200 unsigned int cidx = q->cidx;
201 struct rx_sw_desc *d;
206 rte_pktmbuf_free(d->buf);
210 if (++cidx == q->size) {
220 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
221 * @q: the SGE free list
223 * Unmap the current buffer on an SGE free-buffer Rx queue. The
224 * buffer must be made inaccessible to HW before calling this function.
226 * This is similar to @free_rx_bufs above but does not free the buffer.
227 * Do note that the FL still loses any further access to the buffer.
229 static void unmap_rx_buf(struct sge_fl *q)
231 if (++q->cidx == q->size)
236 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
238 if (q->pend_cred >= 8) {
239 u32 val = adap->params.arch.sge_fl_db;
241 if (is_t4(adap->params.chip))
242 val |= V_PIDX(q->pend_cred / 8);
244 val |= V_PIDX_T5(q->pend_cred / 8);
247 * Make sure all memory writes to the Free List queue are
248 * committed before we tell the hardware about them.
253 * If we don't have access to the new User Doorbell (T5+), use
254 * the old doorbell mechanism; otherwise use the new BAR2
257 if (unlikely(!q->bar2_addr)) {
258 t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
259 val | V_QID(q->cntxt_id));
261 writel(val | V_QID(q->bar2_qid),
262 (void *)((uintptr_t)q->bar2_addr +
266 * This Write memory Barrier will force the write to
267 * the User Doorbell area to be flushed.
275 static inline struct rte_mbuf *cxgbe_rxmbuf_alloc(struct rte_mempool *mp)
279 m = __rte_mbuf_raw_alloc(mp);
280 __rte_mbuf_sanity_check_raw(m, 0);
284 static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
288 sd->dma_addr = mapping; /* includes size low bits */
292 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
294 * @q: the ring to refill
295 * @n: the number of new buffers to allocate
297 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
298 * allocated with the supplied gfp flags. The caller must assure that
299 * @n does not exceed the queue's capacity. If afterwards the queue is
300 * found critically low mark it as starving in the bitmap of starving FLs.
302 * Returns the number of buffers allocated.
304 static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
307 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
308 unsigned int cred = q->avail;
309 __be64 *d = &q->desc[q->pidx];
310 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
311 unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
314 struct rte_mbuf *mbuf = cxgbe_rxmbuf_alloc(rxq->rspq.mb_pool);
318 dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
320 rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
324 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
327 mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
329 mapping |= buf_size_idx;
330 *d++ = cpu_to_be64(mapping);
331 set_rx_sw_desc(sd, mbuf, mapping);
335 if (++q->pidx == q->size) {
342 out: cred = q->avail - cred;
343 q->pend_cred += cred;
346 if (unlikely(fl_starving(adap, q))) {
348 * Make sure data has been written to free list
358 * refill_fl - refill an SGE Rx buffer ring with mbufs
360 * @q: the ring to refill
361 * @n: the number of new buffers to allocate
363 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
364 * allocated with the supplied gfp flags. The caller must assure that
365 * @n does not exceed the queue's capacity. Returns the number of buffers
368 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
370 return refill_fl_usembufs(adap, q, n);
373 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
375 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
379 * alloc_ring - allocate resources for an SGE descriptor ring
380 * @dev: the PCI device's core device
381 * @nelem: the number of descriptors
382 * @elem_size: the size of each descriptor
383 * @sw_size: the size of the SW state associated with each ring element
384 * @phys: the physical address of the allocated ring
385 * @metadata: address of the array holding the SW state for the ring
386 * @stat_size: extra space in HW ring for status information
387 * @node: preferred node for memory allocations
389 * Allocates resources for an SGE descriptor ring, such as Tx queues,
390 * free buffer lists, or response queues. Each SGE ring requires
391 * space for its HW descriptors plus, optionally, space for the SW state
392 * associated with each HW entry (the metadata). The function returns
393 * three values: the virtual address for the HW ring (the return value
394 * of the function), the bus address of the HW ring, and the address
397 static void *alloc_ring(size_t nelem, size_t elem_size,
398 size_t sw_size, dma_addr_t *phys, void *metadata,
399 size_t stat_size, __rte_unused uint16_t queue_id,
400 int socket_id, const char *z_name,
401 const char *z_name_sw)
403 size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
404 const struct rte_memzone *tz;
407 dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
408 "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
409 " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
410 stat_size, queue_id, socket_id, z_name, z_name_sw);
412 tz = rte_memzone_lookup(z_name);
414 dev_debug(adapter, "%s: tz exists...returning existing..\n",
420 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
421 * handle the maximum ring size is allocated in order to allow for
422 * resizing in later calls to the queue setup function.
424 tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
429 memset(tz->addr, 0, len);
431 s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
432 RTE_CACHE_LINE_SIZE, socket_id);
435 dev_err(adapter, "%s: failed to get sw_ring memory\n",
441 *(void **)metadata = s;
443 *phys = (uint64_t)tz->phys_addr;
448 * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
449 * @gl: the gather list
451 * Builds an mbuf from the given packet gather list. Returns the mbuf or
452 * %NULL if mbuf allocation failed.
454 static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
457 * If there's only one mbuf fragment, just return that.
459 if (likely(gl->nfrags == 1))
466 * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
467 * @gl: the gather list
469 * Builds an mbuf from the given packet gather list. Returns the mbuf or
470 * %NULL if mbuf allocation failed.
472 static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
474 return t4_pktgl_to_mbuf_usembufs(gl);
477 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
478 ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
481 * t4_ethrx_handler - process an ingress ethernet packet
482 * @q: the response queue that received the packet
483 * @rsp: the response queue descriptor holding the RX_PKT message
484 * @si: the gather list of packet fragments
486 * Process an ingress ethernet packet and deliver it to the stack.
488 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
489 const struct pkt_gl *si)
491 struct rte_mbuf *mbuf;
492 const struct cpl_rx_pkt *pkt;
493 const struct rss_header *rss_hdr;
495 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
497 rss_hdr = (const void *)rsp;
498 pkt = (const void *)&rsp[1];
499 csum_ok = pkt->csum_calc && !pkt->err_vec;
501 mbuf = t4_pktgl_to_mbuf(si);
502 if (unlikely(!mbuf)) {
503 rxq->stats.rx_drops++;
507 mbuf->port = pkt->iff;
508 if (pkt->l2info & htonl(F_RXF_IP)) {
509 mbuf->ol_flags |= PKT_RX_IPV4_HDR;
510 if (unlikely(!csum_ok))
511 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
513 if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
514 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
515 } else if (pkt->l2info & htonl(F_RXF_IP6)) {
516 mbuf->ol_flags |= PKT_RX_IPV6_HDR;
519 mbuf->port = pkt->iff;
521 if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
522 mbuf->ol_flags |= PKT_RX_RSS_HASH;
523 mbuf->hash.rss = ntohl(rss_hdr->hash_val);
527 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
528 mbuf->vlan_tci = ntohs(pkt->vlan);
531 rxq->stats.rx_bytes += mbuf->pkt_len;
537 * restore_rx_bufs - put back a packet's Rx buffers
538 * @q: the SGE free list
539 * @frags: number of FL buffers to restore
541 * Puts back on an FL the Rx buffers. The buffers have already been
542 * unmapped and are left unmapped, we mark them so to prevent further
543 * unmapping attempts.
545 * This function undoes a series of @unmap_rx_buf calls when we find out
546 * that the current packet can't be processed right away afterall and we
547 * need to come back to it later. This is a very rare event and there's
548 * no effort to make this particularly efficient.
550 static void restore_rx_bufs(struct sge_fl *q, int frags)
554 q->cidx = q->size - 1;
562 * is_new_response - check if a response is newly written
563 * @r: the response descriptor
564 * @q: the response queue
566 * Returns true if a response descriptor contains a yet unprocessed
569 static inline bool is_new_response(const struct rsp_ctrl *r,
570 const struct sge_rspq *q)
572 return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
575 #define CXGB4_MSG_AN ((void *)1)
578 * rspq_next - advance to the next entry in a response queue
581 * Updates the state of a response queue to advance it to the next entry.
583 static inline void rspq_next(struct sge_rspq *q)
585 q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
586 if (unlikely(++q->cidx == q->size)) {
589 q->cur_desc = q->desc;
594 * process_responses - process responses from an SGE response queue
595 * @q: the ingress queue to process
596 * @budget: how many responses can be processed in this round
597 * @rx_pkts: mbuf to put the pkts
599 * Process responses from an SGE response queue up to the supplied budget.
600 * Responses include received packets as well as control messages from FW
603 * Additionally choose the interrupt holdoff time for the next interrupt
604 * on this queue. If the system is under memory shortage use a fairly
605 * long delay to help recovery.
607 static int process_responses(struct sge_rspq *q, int budget,
608 struct rte_mbuf **rx_pkts)
610 int ret = 0, rsp_type;
611 int budget_left = budget;
612 const struct rsp_ctrl *rc;
613 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
614 struct adapter *adapter = q->adapter;
616 while (likely(budget_left)) {
617 rc = (const struct rsp_ctrl *)
618 ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
620 if (!is_new_response(rc, q))
624 * Ensure response has been read
627 rsp_type = G_RSPD_TYPE(rc->u.type_gen);
629 if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
631 const struct rx_sw_desc *rsd;
632 struct rte_mbuf *pkt = NULL;
633 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
635 si.usembufs = rxq->usembufs;
637 * In "use mbufs" mode, we don't pack multiple
638 * ingress packets per buffer (mbuf) so we
639 * should _always_ get a "New Buffer" flags
640 * from the SGE. Also, since we hand the
641 * mbuf's up to the host stack for it to
642 * eventually free, we don't release the mbuf's
643 * in the driver (in contrast to the "packed
644 * page" mode where the driver needs to
645 * release its reference on the page buffers).
647 BUG_ON(!(len & F_RSPD_NEWBUF));
648 len = G_RSPD_LEN(len);
651 /* gather packet fragments */
652 for (frags = 0; len; frags++) {
653 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
654 bufsz = min(get_buf_size(adapter, rsd), len);
656 pkt->data_len = bufsz;
657 pkt->pkt_len = bufsz;
658 si.mbufs[frags] = pkt;
660 unmap_rx_buf(&rxq->fl);
663 si.va = RTE_PTR_ADD(si.mbufs[0]->buf_addr,
664 si.mbufs[0]->data_off);
665 rte_prefetch1(si.va);
668 * For the "use mbuf" case here, we can end up
669 * chewing through our Free List very rapidly
670 * with one entry per Ingress packet getting
671 * consumed. So if the handler() successfully
672 * consumed the mbuf, check to see if we can
673 * refill the Free List incrementally in the
677 ret = q->handler(q, q->cur_desc, &si);
679 if (unlikely(ret != 0)) {
680 restore_rx_bufs(&rxq->fl, frags);
682 rx_pkts[budget - budget_left] = pkt;
683 if (fl_cap(&rxq->fl) - rxq->fl.avail >= 8)
684 __refill_fl(q->adapter, &rxq->fl);
687 } else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
688 ret = q->handler(q, q->cur_desc, NULL);
690 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
694 /* couldn't process descriptor, back off for recovery */
695 q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
704 * If this is a Response Queue with an associated Free List and
705 * there's room for another chunk of new Free List buffer pointers,
706 * refill the Free List.
709 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 8)
710 __refill_fl(q->adapter, &rxq->fl);
712 return budget - budget_left;
715 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
716 unsigned int budget, unsigned int *work_done)
722 *work_done = process_responses(q, budget, rx_pkts);
723 params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
724 q->next_intr_params = params;
725 val = V_CIDXINC(*work_done) | V_SEINTARM(params);
729 * If we don't have access to the new User GTS (T5+),
730 * use the old doorbell mechanism; otherwise use the new
733 if (unlikely(!q->bar2_addr))
734 t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
735 val | V_INGRESSQID((u32)q->cntxt_id));
737 writel(val | V_INGRESSQID(q->bar2_qid),
738 (void *)((uintptr_t)q->bar2_addr +
741 * This Write memory Barrier will force the write to
742 * the User Doorbell area to be flushed.
752 * bar2_address - return the BAR2 address for an SGE Queue's Registers
753 * @adapter: the adapter
754 * @qid: the SGE Queue ID
755 * @qtype: the SGE Queue Type (Egress or Ingress)
756 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
758 * Returns the BAR2 address for the SGE Queue Registers associated with
759 * @qid. If BAR2 SGE Registers aren't available, returns NULL. Also
760 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
761 * Queue Registers. If the BAR2 Queue ID is 0, then "Inferred Queue ID"
762 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
764 static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
765 enum t4_bar2_qtype qtype,
766 unsigned int *pbar2_qid)
771 ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
775 return adapter->bar2 + bar2_qoffset;
778 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
780 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
781 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
783 return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
784 rq->cntxt_id, fl_id, 0xffff);
787 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
789 struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
790 unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
792 return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
793 rq->cntxt_id, fl_id, 0xffff);
797 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
798 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
800 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
801 struct rte_eth_dev *eth_dev, int intr_idx,
802 struct sge_fl *fl, rspq_handler_t hnd, int cong,
803 struct rte_mempool *mp, int queue_id, int socket_id)
807 struct sge *s = &adap->sge;
808 struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
809 char z_name[RTE_MEMZONE_NAMESIZE];
810 char z_name_sw[RTE_MEMZONE_NAMESIZE];
811 unsigned int nb_refill;
813 /* Size needs to be multiple of 16, including status entry. */
814 iq->size = roundup(iq->size, 16);
816 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
817 eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
818 eth_dev->data->port_id, queue_id);
819 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
821 iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
822 queue_id, socket_id, z_name, z_name_sw);
826 memset(&c, 0, sizeof(c));
827 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
828 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
829 V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
830 c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
832 c.type_to_iqandstindex =
833 htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
834 V_FW_IQ_CMD_IQASYNCH(fwevtq) |
835 V_FW_IQ_CMD_VIID(pi->viid) |
836 V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
837 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
838 V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
840 c.iqdroprss_to_iqesize =
841 htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
842 F_FW_IQ_CMD_IQGTSMODE |
843 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
844 V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
845 c.iqsize = htons(iq->size);
846 c.iqaddr = cpu_to_be64(iq->phys_addr);
848 c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
851 struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
853 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
856 * Allocate the ring for the hardware free list (with space
857 * for its status page) along with the associated software
858 * descriptor ring. The free list size needs to be a multiple
859 * of the Egress Queue Unit and at least 2 Egress Units larger
860 * than the SGE's Egress Congrestion Threshold
861 * (fl_starve_thres - 1).
863 if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
864 fl->size = s->fl_starve_thres - 1 + 2 * 8;
865 fl->size = roundup(fl->size, 8);
867 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
868 eth_dev->driver->pci_drv.name,
869 fwevtq ? "fwq_ring" : "fl_ring",
870 eth_dev->data->port_id, queue_id);
871 snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
873 fl->desc = alloc_ring(fl->size, sizeof(__be64),
874 sizeof(struct rx_sw_desc),
875 &fl->addr, &fl->sdesc, s->stat_len,
876 queue_id, socket_id, z_name, z_name_sw);
881 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
882 c.iqns_to_fl0congen |=
883 htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
884 (unlikely(rxq->usembufs) ?
885 0 : F_FW_IQ_CMD_FL0PACKEN) |
886 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
887 F_FW_IQ_CMD_FL0PADEN);
889 c.iqns_to_fl0congen |=
890 htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
891 F_FW_IQ_CMD_FL0CONGCIF |
892 F_FW_IQ_CMD_FL0CONGEN);
894 /* In T6, for egress queue type FL there is internal overhead
895 * of 16B for header going into FLM module.
896 * Hence maximum allowed burst size will be 448 bytes.
898 c.fl0dcaen_to_fl0cidxfthresh =
899 htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
900 V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
901 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
902 c.fl0size = htons(flsz);
903 c.fl0addr = cpu_to_be64(fl->addr);
906 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
910 iq->cur_desc = iq->desc;
913 iq->next_intr_params = iq->intr_params;
914 iq->cntxt_id = ntohs(c.iqid);
915 iq->abs_id = ntohs(c.physiqid);
916 iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
918 iq->size--; /* subtract status entry */
919 iq->eth_dev = eth_dev;
923 /* set offset to -1 to distinguish ingress queues without FL */
924 iq->offset = fl ? 0 : -1;
927 fl->cntxt_id = ntohs(c.fl0id);
932 fl->alloc_failed = 0;
935 * Note, we must initialize the BAR2 Free List User Doorbell
936 * information before refilling the Free List!
938 fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
939 T4_BAR2_QTYPE_EGRESS,
942 nb_refill = refill_fl(adap, fl, fl_cap(fl));
943 if (nb_refill != fl_cap(fl)) {
945 dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
952 * For T5 and later we attempt to set up the Congestion Manager values
953 * of the new RX Ethernet Queue. This should really be handled by
954 * firmware because it's more complex than any host driver wants to
955 * get involved with and it's different per chip and this is almost
956 * certainly wrong. Formware would be wrong as well, but it would be
957 * a lot easier to fix in one place ... For now we do something very
958 * simple (and hopefully less wrong).
960 if (!is_t4(adap->params.chip) && cong >= 0) {
964 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
965 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
966 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
968 val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
970 val = V_CONMCTXT_CNGTPMODE(
971 X_CONMCTXT_CNGTPMODE_CHANNEL);
972 for (i = 0; i < 4; i++) {
974 val |= V_CONMCTXT_CNGCHMAP(1 <<
978 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
981 dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
988 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
989 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
998 if (fl && fl->desc) {
1007 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
1010 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
1012 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1013 rq->cntxt_id, fl_id, 0xffff);
1019 free_rx_bufs(fl, fl->avail);
1020 rte_free(fl->sdesc);
1027 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
1029 if (rxq->rspq.desc) {
1030 t4_sge_eth_rxq_stop(adap, &rxq->rspq);
1031 free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
1036 * t4_sge_init - initialize SGE
1037 * @adap: the adapter
1039 * Performs SGE initialization needed every time after a chip reset.
1040 * We do not initialize any of the queues here, instead the driver
1041 * top-level must request those individually.
1043 * Called in two different modes:
1045 * 1. Perform actual hardware initialization and record hard-coded
1046 * parameters which were used. This gets used when we're the
1047 * Master PF and the Firmware Configuration File support didn't
1048 * work for some reason.
1050 * 2. We're not the Master PF or initialization was performed with
1051 * a Firmware Configuration File. In this case we need to grab
1052 * any of the SGE operating parameters that we need to have in
1053 * order to do our job and make sure we can live with them ...
1055 static int t4_sge_init_soft(struct adapter *adap)
1057 struct sge *s = &adap->sge;
1058 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
1059 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
1060 u32 ingress_rx_threshold;
1063 * Verify that CPL messages are going to the Ingress Queue for
1064 * process_responses() and that only packet data is going to the
1067 if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
1068 V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
1069 dev_err(adap, "bad SGE CPL MODE\n");
1074 * Validate the Host Buffer Register Array indices that we want to
1077 * XXX Note that we should really read through the Host Buffer Size
1078 * XXX register array and find the indices of the Buffer Sizes which
1079 * XXX meet our needs!
1081 #define READ_FL_BUF(x) \
1082 t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
1084 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
1085 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
1086 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
1087 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
1090 * We only bother using the Large Page logic if the Large Page Buffer
1091 * is larger than our Page Size Buffer.
1093 if (fl_large_pg <= fl_small_pg)
1099 * The Page Size Buffer must be exactly equal to our Page Size and the
1100 * Large Page Size Buffer should be 0 (per above) or a power of 2.
1102 if (fl_small_pg != PAGE_SIZE ||
1103 (fl_large_pg & (fl_large_pg - 1)) != 0) {
1104 dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
1105 fl_small_pg, fl_large_pg);
1109 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
1111 if (adap->use_unpacked_mode) {
1114 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
1115 dev_err(adap, "bad SGE FL small MTU %d\n",
1119 if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
1120 dev_err(adap, "bad SGE FL large MTU %d\n",
1129 * Retrieve our RX interrupt holdoff timer values and counter
1130 * threshold values from the SGE parameters.
1132 timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
1133 timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
1134 timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
1135 s->timer_val[0] = core_ticks_to_us(adap,
1136 G_TIMERVALUE0(timer_value_0_and_1));
1137 s->timer_val[1] = core_ticks_to_us(adap,
1138 G_TIMERVALUE1(timer_value_0_and_1));
1139 s->timer_val[2] = core_ticks_to_us(adap,
1140 G_TIMERVALUE2(timer_value_2_and_3));
1141 s->timer_val[3] = core_ticks_to_us(adap,
1142 G_TIMERVALUE3(timer_value_2_and_3));
1143 s->timer_val[4] = core_ticks_to_us(adap,
1144 G_TIMERVALUE4(timer_value_4_and_5));
1145 s->timer_val[5] = core_ticks_to_us(adap,
1146 G_TIMERVALUE5(timer_value_4_and_5));
1148 ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
1149 s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
1150 s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
1151 s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
1152 s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
1157 int t4_sge_init(struct adapter *adap)
1159 struct sge *s = &adap->sge;
1160 u32 sge_control, sge_control2, sge_conm_ctrl;
1161 unsigned int ingpadboundary, ingpackboundary;
1162 int ret, egress_threshold;
1165 * Ingress Padding Boundary and Egress Status Page Size are set up by
1166 * t4_fixup_host_params().
1168 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
1169 s->pktshift = G_PKTSHIFT(sge_control);
1170 s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
1173 * T4 uses a single control field to specify both the PCIe Padding and
1174 * Packing Boundary. T5 introduced the ability to specify these
1175 * separately. The actual Ingress Packet Data alignment boundary
1176 * within Packed Buffer Mode is the maximum of these two
1179 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
1180 X_INGPADBOUNDARY_SHIFT);
1181 s->fl_align = ingpadboundary;
1183 if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
1185 * T5 has a weird interpretation of one of the PCIe Packing
1186 * Boundary values. No idea why ...
1188 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
1189 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
1190 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
1191 ingpackboundary = 16;
1193 ingpackboundary = 1 << (ingpackboundary +
1194 X_INGPACKBOUNDARY_SHIFT);
1196 s->fl_align = max(ingpadboundary, ingpackboundary);
1199 ret = t4_sge_init_soft(adap);
1201 dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
1207 * A FL with <= fl_starve_thres buffers is starving and a periodic
1208 * timer will attempt to refill it. This needs to be larger than the
1209 * SGE's Egress Congestion Threshold. If it isn't, then we can get
1210 * stuck waiting for new packets while the SGE is waiting for us to
1211 * give it more Free List entries. (Note that the SGE's Egress
1212 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
1213 * there was only a single field to control this. For T5 there's the
1214 * original field which now only applies to Unpacked Mode Free List
1215 * buffers and a new field which only applies to Packed Mode Free List
1218 sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
1219 if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
1220 egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
1222 egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
1223 s->fl_starve_thres = 2 * egress_threshold + 1;