4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 /* This file should not be included directly. Include common.h instead. */
36 #ifndef __T4_ADAPTER_H__
37 #define __T4_ADAPTER_H__
41 #include "cxgbe_compat.h"
42 #include "t4_regs_values.h"
45 MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */
52 PORT_RSS_DONE = (1 << 0),
56 struct adapter *adapter; /* adapter that this port belongs to */
57 struct rte_eth_dev *eth_dev; /* associated rte eth device */
58 struct port_stats stats_base; /* port statistics base */
59 struct link_config link_cfg; /* link configuration info */
61 unsigned long flags; /* port related flags */
62 short int xact_addr_filt; /* index of exact MAC address filter */
64 u16 viid; /* associated virtual interface id */
65 s8 mdio_addr; /* address of the PHY */
66 u8 port_type; /* firmware port type */
67 u8 mod_type; /* firmware module type */
68 u8 port_id; /* physical port ID */
69 u8 tx_chan; /* associated channel */
71 u8 n_rx_qsets; /* # of rx qsets */
72 u8 n_tx_qsets; /* # of tx qsets */
73 u8 first_qset; /* index of first qset */
75 u16 *rss; /* rss table */
76 u8 rss_mode; /* rss mode */
77 u16 rss_size; /* size of VI's RSS table slice */
80 /* Enable or disable autonegotiation. If this is set to enable,
81 * the forced link modes above are completely ignored.
83 #define AUTONEG_DISABLE 0x00
84 #define AUTONEG_ENABLE 0x01
86 enum { /* adapter flags */
87 FULL_INIT_DONE = (1 << 0),
89 USING_MSIX = (1 << 2),
90 FW_QUEUE_BOUND = (1 << 3),
92 CFG_QUEUES = (1 << 5),
96 struct rx_sw_desc { /* SW state per Rx descriptor */
97 void *buf; /* struct page or mbuf */
101 struct sge_fl { /* SGE free-buffer queue state */
103 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
105 dma_addr_t addr; /* bus address of HW ring start */
106 __be64 *desc; /* address of HW Rx descriptor ring */
108 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
109 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
111 unsigned int cntxt_id; /* SGE relative QID for the free list */
112 unsigned int size; /* capacity of free list */
114 unsigned int avail; /* # of available Rx buffers */
115 unsigned int pend_cred; /* new buffers since last FL DB ring */
116 unsigned int cidx; /* consumer index */
117 unsigned int pidx; /* producer index */
119 unsigned long alloc_failed; /* # of times buffer allocation failed */
120 unsigned long low; /* # of times momentarily starving */
123 #define MAX_MBUF_FRAGS (16384 / 512 + 2)
125 /* A packet gather list */
128 struct rte_mbuf *mbufs[MAX_MBUF_FRAGS];
130 void *va; /* virtual address of first byte */
131 unsigned int nfrags; /* # of fragments */
132 unsigned int tot_len; /* total length of fragments */
133 bool usembufs; /* use mbufs for fragments */
136 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
137 const struct pkt_gl *gl);
139 struct sge_rspq { /* state for an SGE response queue */
140 struct adapter *adapter; /* adapter that this queue belongs to */
141 struct rte_eth_dev *eth_dev; /* associated rte eth device */
142 struct rte_mempool *mb_pool; /* associated mempool */
144 dma_addr_t phys_addr; /* physical address of the ring */
145 __be64 *desc; /* address of HW response ring */
146 const __be64 *cur_desc; /* current descriptor in queue */
148 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
149 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
151 unsigned int cidx; /* consumer index */
152 unsigned int gts_idx; /* last gts write sent */
153 unsigned int iqe_len; /* entry size */
154 unsigned int size; /* capacity of response queue */
155 int offset; /* offset into current Rx buffer */
157 u8 gen; /* current generation bit */
158 u8 intr_params; /* interrupt holdoff parameters */
159 u8 next_intr_params; /* holdoff params for next interrupt */
160 u8 pktcnt_idx; /* interrupt packet threshold */
161 u8 port_id; /* associated port-id */
162 u8 idx; /* queue index within its group */
163 u16 cntxt_id; /* SGE relative QID for the response Q */
164 u16 abs_id; /* absolute SGE id for the response q */
166 rspq_handler_t handler; /* associated handler for this response q */
169 struct sge_eth_rx_stats { /* Ethernet rx queue statistics */
170 u64 pkts; /* # of ethernet packets */
171 u64 rx_bytes; /* # of ethernet bytes */
172 u64 rx_cso; /* # of Rx checksum offloads */
173 u64 vlan_ex; /* # of Rx VLAN extractions */
174 u64 rx_drops; /* # of packets dropped due to no mem */
177 struct sge_eth_rxq { /* a SW Ethernet Rx queue */
178 struct sge_rspq rspq;
180 struct sge_eth_rx_stats stats;
181 bool usembufs; /* one ingress packet per mbuf FL buffer */
182 } __rte_cache_aligned;
185 * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per
186 * packet (if one sgl is present) and type 1 needs 32 bytes. This means
187 * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit
188 * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR
189 * to be able to free those mbufs when we get completions back from the FW.
190 * Allocating the maximum number of pointers in every tx desc is a waste
191 * of memory resources so we only store 2 pointers per tx desc which should
192 * be enough since a tx desc can only fit 2 packets in the best case
193 * scenario where a packet needs 32 bytes.
195 #define ETH_COALESCE_PKT_NUM 15
196 #define ETH_COALESCE_PKT_PER_DESC 2
198 struct tx_eth_coal_desc {
199 struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC];
200 struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC];
208 struct tx_sw_desc { /* SW state per Tx descriptor */
209 struct rte_mbuf *mbuf;
210 struct ulptx_sgl *sgl;
211 struct tx_eth_coal_desc coalesce;
215 EQ_STOPPED = (1 << 0),
218 struct eth_coalesce {
228 struct tx_desc *desc; /* address of HW Tx descriptor ring */
229 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
230 struct sge_qstat *stat; /* queue status entry */
231 struct eth_coalesce coalesce; /* coalesce info */
233 uint64_t phys_addr; /* physical address of the ring */
235 void __iomem *bar2_addr; /* address of BAR2 Queue registers */
236 unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */
238 unsigned int cntxt_id; /* SGE relative QID for the Tx Q */
239 unsigned int in_use; /* # of in-use Tx descriptors */
240 unsigned int size; /* # of descriptors */
241 unsigned int cidx; /* SW consumer index */
242 unsigned int pidx; /* producer index */
243 unsigned int dbidx; /* last idx when db ring was done */
244 unsigned int equeidx; /* last sent credit request */
245 unsigned int last_pidx; /* last pidx recorded by tx monitor */
246 unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
248 int db_disabled; /* doorbell state */
249 unsigned short db_pidx; /* doorbell producer index */
250 unsigned short db_pidx_inc; /* doorbell producer increment */
253 struct sge_eth_tx_stats { /* Ethernet tx queue statistics */
254 u64 pkts; /* # of ethernet packets */
255 u64 tx_bytes; /* # of ethernet bytes */
256 u64 tso; /* # of TSO requests */
257 u64 tx_cso; /* # of Tx checksum offloads */
258 u64 vlan_ins; /* # of Tx VLAN insertions */
259 u64 mapping_err; /* # of I/O MMU packet mapping errors */
260 u64 coal_wr; /* # of coalesced wr */
261 u64 coal_pkts; /* # of coalesced packets */
264 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
266 struct rte_eth_dev *eth_dev; /* port that this queue belongs to */
267 struct sge_eth_tx_stats stats; /* queue statistics */
268 rte_spinlock_t txq_lock;
270 unsigned int flags; /* flags for state of the queue */
271 } __rte_cache_aligned;
274 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
275 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
276 struct sge_rspq fw_evtq __rte_cache_aligned;
278 u16 max_ethqsets; /* # of available Ethernet queue sets */
279 u32 stat_len; /* length of status page at ring end */
280 u32 pktshift; /* padding between CPL & packet data */
282 /* response queue interrupt parameters */
283 u16 timer_val[SGE_NTIMERS];
284 u8 counter_val[SGE_NCOUNTERS];
286 u32 fl_align; /* response queue message alignment */
287 u32 fl_pg_order; /* large page allocation size */
288 u32 fl_starve_thres; /* Free List starvation threshold */
291 #define T4_OS_NEEDS_MBOX_LOCKING 1
294 * OS Lock/List primitives for those interfaces in the Common Code which
299 TAILQ_ENTRY(mbox_entry) next;
302 TAILQ_HEAD(mbox_list, mbox_entry);
305 struct rte_pci_device *pdev; /* associated rte pci device */
306 struct rte_eth_dev *eth_dev; /* first port's rte eth device */
307 struct adapter_params params; /* adapter parameters */
308 struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */
309 struct sge sge; /* associated SGE */
311 /* support for single-threading access to adapter mailbox registers */
312 struct mbox_list mbox_list;
313 rte_spinlock_t mbox_lock;
315 u8 *regs; /* pointer to registers region */
316 u8 *bar2; /* pointer to bar2 region */
317 unsigned long flags; /* adapter flags */
318 unsigned int mbox; /* associated mailbox */
319 unsigned int pf; /* associated physical function id */
321 int use_unpacked_mode; /* unpacked rx mode state */
324 #define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
326 static inline uint64_t cxgbe_read_addr64(volatile void *addr)
328 uint64_t val = CXGBE_PCI_REG(addr);
329 uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4));
331 val2 = (uint64_t)(val2 << 32);
336 static inline uint32_t cxgbe_read_addr(volatile void *addr)
338 return CXGBE_PCI_REG(addr);
341 #define CXGBE_PCI_REG_ADDR(adap, reg) \
342 ((volatile uint32_t *)((char *)(adap)->regs + (reg)))
344 #define CXGBE_READ_REG(adap, reg) \
345 cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg)))
347 #define CXGBE_READ_REG64(adap, reg) \
348 cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
350 #define CXGBE_PCI_REG_WRITE(reg, value) ({ \
351 CXGBE_PCI_REG((reg)) = (value); })
353 #define CXGBE_WRITE_REG(adap, reg, value) \
354 CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
356 static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
358 CXGBE_PCI_REG(addr) = val;
359 CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32);
363 #define CXGBE_WRITE_REG64(adap, reg, value) \
364 cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
367 * t4_read_reg - read a HW register
368 * @adapter: the adapter
369 * @reg_addr: the register address
371 * Returns the 32-bit value of the given HW register.
373 static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
375 u32 val = CXGBE_READ_REG(adapter, reg_addr);
377 CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr,
383 * t4_write_reg - write a HW register
384 * @adapter: the adapter
385 * @reg_addr: the register address
386 * @val: the value to write
388 * Write a 32-bit value into the given HW register.
390 static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
392 CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
394 CXGBE_WRITE_REG(adapter, reg_addr, val);
398 * t4_read_reg64 - read a 64-bit HW register
399 * @adapter: the adapter
400 * @reg_addr: the register address
402 * Returns the 64-bit value of the given HW register.
404 static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr)
406 u64 val = CXGBE_READ_REG64(adapter, reg_addr);
408 CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n",
409 reg_addr, (unsigned long long)val);
414 * t4_write_reg64 - write a 64-bit HW register
415 * @adapter: the adapter
416 * @reg_addr: the register address
417 * @val: the value to write
419 * Write a 64-bit value into the given HW register.
421 static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr,
424 CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr,
425 (unsigned long long)val);
427 CXGBE_WRITE_REG64(adapter, reg_addr, val);
431 * t4_os_set_hw_addr - store a port's MAC address in SW
432 * @adapter: the adapter
433 * @port_idx: the port index
434 * @hw_addr: the Ethernet address
436 * Store the Ethernet address of the given port in SW. Called by the
437 * common code when it retrieves a port's Ethernet address from EEPROM.
439 static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx,
442 struct port_info *pi = &adapter->port[port_idx];
444 ether_addr_copy((struct ether_addr *)hw_addr,
445 &pi->eth_dev->data->mac_addrs[0]);
449 * t4_os_lock_init - initialize spinlock
450 * @lock: the spinlock
452 static inline void t4_os_lock_init(rte_spinlock_t *lock)
454 rte_spinlock_init(lock);
458 * t4_os_lock - spin until lock is acquired
459 * @lock: the spinlock
461 static inline void t4_os_lock(rte_spinlock_t *lock)
463 rte_spinlock_lock(lock);
467 * t4_os_unlock - unlock a spinlock
468 * @lock: the spinlock
470 static inline void t4_os_unlock(rte_spinlock_t *lock)
472 rte_spinlock_unlock(lock);
476 * t4_os_init_list_head - initialize
477 * @head: head of list to initialize [to empty]
479 static inline void t4_os_init_list_head(struct mbox_list *head)
484 static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head)
486 return TAILQ_FIRST(head);
490 * t4_os_atomic_add_tail - Enqueue list element atomically onto list
491 * @new: the entry to be addded to the queue
492 * @head: current head of the linked list
493 * @lock: lock to use to guarantee atomicity
495 static inline void t4_os_atomic_add_tail(struct mbox_entry *entry,
496 struct mbox_list *head,
497 rte_spinlock_t *lock)
500 TAILQ_INSERT_TAIL(head, entry, next);
505 * t4_os_atomic_list_del - Dequeue list element atomically from list
506 * @entry: the entry to be remove/dequeued from the list.
507 * @lock: the spinlock
509 static inline void t4_os_atomic_list_del(struct mbox_entry *entry,
510 struct mbox_list *head,
511 rte_spinlock_t *lock)
514 TAILQ_REMOVE(head, entry, next);
519 * adap2pinfo - return the port_info of a port
521 * @idx: the port index
523 * Return the port_info structure for the port of the given index.
525 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
527 return &adap->port[idx];
530 void *t4_alloc_mem(size_t size);
531 void t4_free_mem(void *addr);
532 #define t4_os_alloc(_size) t4_alloc_mem((_size))
533 #define t4_os_free(_ptr) t4_free_mem((_ptr))
535 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
536 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
538 void reclaim_completed_tx(struct sge_txq *q);
539 void t4_free_sge_resources(struct adapter *adap);
540 void t4_sge_tx_monitor_start(struct adapter *adap);
541 void t4_sge_tx_monitor_stop(struct adapter *adap);
542 int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf);
543 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
544 const struct pkt_gl *gl);
545 int t4_sge_init(struct adapter *adap);
546 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
547 struct rte_eth_dev *eth_dev, uint16_t queue_id,
548 unsigned int iqid, int socket_id);
549 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq,
550 struct rte_eth_dev *eth_dev, int intr_idx,
551 struct sge_fl *fl, rspq_handler_t handler,
552 int cong, struct rte_mempool *mp, int queue_id,
554 int t4_sge_eth_txq_start(struct sge_eth_txq *txq);
555 int t4_sge_eth_txq_stop(struct sge_eth_txq *txq);
556 void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq);
557 int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq);
558 int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq);
559 void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq);
560 void t4_sge_eth_clear_queues(struct port_info *pi);
561 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
563 int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
564 unsigned int budget, unsigned int *work_done);
565 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
567 #endif /* __T4_ADAPTER_H__ */