From 3bd122eef2cc1de8d65b5219e54f4be3affa0cc2 Mon Sep 17 00:00:00 2001 From: Rahul Lakkireddy Date: Tue, 30 Jun 2015 04:58:34 +0530 Subject: [PATCH] cxgbe/base: add hardware API for Chelsio T5 series adapters Adds hardware specific api for all the Chelsio T5 adapters under drivers/net/cxgbe/base directory. Signed-off-by: Rahul Lakkireddy Signed-off-by: Kumar Sanghvi --- drivers/net/cxgbe/base/adapter.h | 565 +++++ drivers/net/cxgbe/base/common.h | 401 ++++ drivers/net/cxgbe/base/t4_chip_type.h | 79 + drivers/net/cxgbe/base/t4_hw.c | 2686 +++++++++++++++++++++++ drivers/net/cxgbe/base/t4_hw.h | 149 ++ drivers/net/cxgbe/base/t4_msg.h | 345 +++ drivers/net/cxgbe/base/t4_pci_id_tbl.h | 148 ++ drivers/net/cxgbe/base/t4_regs.h | 779 +++++++ drivers/net/cxgbe/base/t4_regs_values.h | 168 ++ drivers/net/cxgbe/base/t4fw_interface.h | 1730 +++++++++++++++ 10 files changed, 7050 insertions(+) create mode 100644 drivers/net/cxgbe/base/adapter.h create mode 100644 drivers/net/cxgbe/base/common.h create mode 100644 drivers/net/cxgbe/base/t4_chip_type.h create mode 100644 drivers/net/cxgbe/base/t4_hw.c create mode 100644 drivers/net/cxgbe/base/t4_hw.h create mode 100644 drivers/net/cxgbe/base/t4_msg.h create mode 100644 drivers/net/cxgbe/base/t4_pci_id_tbl.h create mode 100644 drivers/net/cxgbe/base/t4_regs.h create mode 100644 drivers/net/cxgbe/base/t4_regs_values.h create mode 100644 drivers/net/cxgbe/base/t4fw_interface.h diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h new file mode 100644 index 0000000000..0ea1c958b1 --- /dev/null +++ b/drivers/net/cxgbe/base/adapter.h @@ -0,0 +1,565 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* This file should not be included directly. Include common.h instead. */ + +#ifndef __T4_ADAPTER_H__ +#define __T4_ADAPTER_H__ + +#include + +#include "cxgbe_compat.h" +#include "t4_regs_values.h" + +enum { + MAX_ETH_QSETS = 64, /* # of Ethernet Tx/Rx queue sets */ +}; + +struct adapter; +struct sge_rspq; + +enum { + PORT_RSS_DONE = (1 << 0), +}; + +struct port_info { + struct adapter *adapter; /* adapter that this port belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct port_stats stats_base; /* port statistics base */ + struct link_config link_cfg; /* link configuration info */ + + unsigned long flags; /* port related flags */ + short int xact_addr_filt; /* index of exact MAC address filter */ + + u16 viid; /* associated virtual interface id */ + s8 mdio_addr; /* address of the PHY */ + u8 port_type; /* firmware port type */ + u8 mod_type; /* firmware module type */ + u8 port_id; /* physical port ID */ + u8 tx_chan; /* associated channel */ + + u8 n_rx_qsets; /* # of rx qsets */ + u8 n_tx_qsets; /* # of tx qsets */ + u8 first_qset; /* index of first qset */ + + u16 *rss; /* rss table */ + u8 rss_mode; /* rss mode */ + u16 rss_size; /* size of VI's RSS table slice */ +}; + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +enum { /* adapter flags */ + FULL_INIT_DONE = (1 << 0), + USING_MSI = (1 << 1), + USING_MSIX = (1 << 2), + FW_QUEUE_BOUND = (1 << 3), + FW_OK = (1 << 4), + CFG_QUEUES = (1 << 5), + MASTER_PF = (1 << 6), +}; + +struct rx_sw_desc { /* SW state per Rx descriptor */ + void *buf; /* struct page or mbuf */ + dma_addr_t dma_addr; +}; + +struct sge_fl { /* SGE free-buffer queue state */ + /* RO fields */ + struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ + + dma_addr_t addr; /* bus address of HW ring start */ + __be64 *desc; /* address of HW Rx descriptor ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the free list */ + unsigned int size; /* capacity of free list */ + + unsigned int avail; /* # of available Rx buffers */ + unsigned int pend_cred; /* new buffers since last FL DB ring */ + unsigned int cidx; /* consumer index */ + unsigned int pidx; /* producer index */ + + unsigned long alloc_failed; /* # of times buffer allocation failed */ + unsigned long low; /* # of times momentarily starving */ +}; + +#define MAX_MBUF_FRAGS (16384 / 512 + 2) + +/* A packet gather list */ +struct pkt_gl { + union { + struct rte_mbuf *mbufs[MAX_MBUF_FRAGS]; + } /* UNNAMED */; + void *va; /* virtual address of first byte */ + unsigned int nfrags; /* # of fragments */ + unsigned int tot_len; /* total length of fragments */ + bool usembufs; /* use mbufs for fragments */ +}; + +typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); + +struct sge_rspq { /* state for an SGE response queue */ + struct adapter *adapter; /* adapter that this queue belongs to */ + struct rte_eth_dev *eth_dev; /* associated rte eth device */ + struct rte_mempool *mb_pool; /* associated mempool */ + + dma_addr_t phys_addr; /* physical address of the ring */ + __be64 *desc; /* address of HW response ring */ + const __be64 *cur_desc; /* current descriptor in queue */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cidx; /* consumer index */ + unsigned int iqe_len; /* entry size */ + unsigned int size; /* capacity of response queue */ + int offset; /* offset into current Rx buffer */ + + u8 gen; /* current generation bit */ + u8 intr_params; /* interrupt holdoff parameters */ + u8 next_intr_params; /* holdoff params for next interrupt */ + u8 pktcnt_idx; /* interrupt packet threshold */ + u8 idx; /* queue index within its group */ + u16 cntxt_id; /* SGE relative QID for the response Q */ + u16 abs_id; /* absolute SGE id for the response q */ + + rspq_handler_t handler; /* associated handler for this response q */ +}; + +struct sge_eth_rx_stats { /* Ethernet rx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 rx_bytes; /* # of ethernet bytes */ + u64 rx_cso; /* # of Rx checksum offloads */ + u64 vlan_ex; /* # of Rx VLAN extractions */ + u64 rx_drops; /* # of packets dropped due to no mem */ +}; + +struct sge_eth_rxq { /* a SW Ethernet Rx queue */ + struct sge_rspq rspq; + struct sge_fl fl; + struct sge_eth_rx_stats stats; + bool usembufs; /* one ingress packet per mbuf FL buffer */ +} __rte_cache_aligned; + +/* + * Currently there are two types of coalesce WR. Type 0 needs 48 bytes per + * packet (if one sgl is present) and type 1 needs 32 bytes. This means + * that type 0 can fit a maximum of 10 packets per WR and type 1 can fit + * 15 packets. We need to keep track of the mbuf pointers in a coalesce WR + * to be able to free those mbufs when we get completions back from the FW. + * Allocating the maximum number of pointers in every tx desc is a waste + * of memory resources so we only store 2 pointers per tx desc which should + * be enough since a tx desc can only fit 2 packets in the best case + * scenario where a packet needs 32 bytes. + */ +#define ETH_COALESCE_PKT_NUM 15 +#define ETH_COALESCE_PKT_PER_DESC 2 + +struct tx_eth_coal_desc { + struct rte_mbuf *mbuf[ETH_COALESCE_PKT_PER_DESC]; + struct ulptx_sgl *sgl[ETH_COALESCE_PKT_PER_DESC]; + int idx; +}; + +struct tx_desc { + __be64 flit[8]; +}; + +struct tx_sw_desc { /* SW state per Tx descriptor */ + struct rte_mbuf *mbuf; + struct ulptx_sgl *sgl; + struct tx_eth_coal_desc coalesce; +}; + +enum { + EQ_STOPPED = (1 << 0), +}; + +struct eth_coalesce { + unsigned char *ptr; + unsigned char type; + unsigned int idx; + unsigned int len; + unsigned int flits; + unsigned int max; +}; + +struct sge_txq { + struct tx_desc *desc; /* address of HW Tx descriptor ring */ + struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ + struct sge_qstat *stat; /* queue status entry */ + struct eth_coalesce coalesce; /* coalesce info */ + + uint64_t phys_addr; /* physical address of the ring */ + + void __iomem *bar2_addr; /* address of BAR2 Queue registers */ + unsigned int bar2_qid; /* Queue ID for BAR2 Queue registers */ + + unsigned int cntxt_id; /* SGE relative QID for the Tx Q */ + unsigned int in_use; /* # of in-use Tx descriptors */ + unsigned int size; /* # of descriptors */ + unsigned int cidx; /* SW consumer index */ + unsigned int pidx; /* producer index */ + unsigned int dbidx; /* last idx when db ring was done */ + unsigned int equeidx; /* last sent credit request */ + unsigned int last_pidx; /* last pidx recorded by tx monitor */ + unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */ + + int db_disabled; /* doorbell state */ + unsigned short db_pidx; /* doorbell producer index */ + unsigned short db_pidx_inc; /* doorbell producer increment */ +}; + +struct sge_eth_tx_stats { /* Ethernet tx queue statistics */ + u64 pkts; /* # of ethernet packets */ + u64 tx_bytes; /* # of ethernet bytes */ + u64 tso; /* # of TSO requests */ + u64 tx_cso; /* # of Tx checksum offloads */ + u64 vlan_ins; /* # of Tx VLAN insertions */ + u64 mapping_err; /* # of I/O MMU packet mapping errors */ + u64 coal_wr; /* # of coalesced wr */ + u64 coal_pkts; /* # of coalesced packets */ +}; + +struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ + struct sge_txq q; + struct rte_eth_dev *eth_dev; /* port that this queue belongs to */ + struct sge_eth_tx_stats stats; /* queue statistics */ + rte_spinlock_t txq_lock; + + unsigned int flags; /* flags for state of the queue */ +} __rte_cache_aligned; + +struct sge { + struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; + struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; + struct sge_rspq fw_evtq __rte_cache_aligned; + + u16 max_ethqsets; /* # of available Ethernet queue sets */ + u32 stat_len; /* length of status page at ring end */ + u32 pktshift; /* padding between CPL & packet data */ + + /* response queue interrupt parameters */ + u16 timer_val[SGE_NTIMERS]; + u8 counter_val[SGE_NCOUNTERS]; + + u32 fl_align; /* response queue message alignment */ + u32 fl_pg_order; /* large page allocation size */ + u32 fl_starve_thres; /* Free List starvation threshold */ +}; + +#define T4_OS_NEEDS_MBOX_LOCKING 1 + +/* + * OS Lock/List primitives for those interfaces in the Common Code which + * need this. + */ + +struct mbox_entry { + TAILQ_ENTRY(mbox_entry) next; +}; + +TAILQ_HEAD(mbox_list, mbox_entry); + +struct adapter { + struct rte_pci_device *pdev; /* associated rte pci device */ + struct rte_eth_dev *eth_dev; /* first port's rte eth device */ + struct adapter_params params; /* adapter parameters */ + struct port_info port[MAX_NPORTS]; /* ports belonging to this adapter */ + struct sge sge; /* associated SGE */ + + /* support for single-threading access to adapter mailbox registers */ + struct mbox_list mbox_list; + rte_spinlock_t mbox_lock; + + u8 *regs; /* pointer to registers region */ + u8 *bar2; /* pointer to bar2 region */ + unsigned long flags; /* adapter flags */ + unsigned int mbox; /* associated mailbox */ + unsigned int pf; /* associated physical function id */ + + int use_unpacked_mode; /* unpacked rx mode state */ +}; + +#define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +static inline uint64_t cxgbe_read_addr64(volatile void *addr) +{ + uint64_t val = CXGBE_PCI_REG(addr); + uint64_t val2 = CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)); + + val2 = (uint64_t)(val2 << 32); + val += val2; + return val; +} + +static inline uint32_t cxgbe_read_addr(volatile void *addr) +{ + return CXGBE_PCI_REG(addr); +} + +#define CXGBE_PCI_REG_ADDR(adap, reg) \ + ((volatile uint32_t *)((char *)(adap)->regs + (reg))) + +#define CXGBE_READ_REG(adap, reg) \ + cxgbe_read_addr(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_READ_REG64(adap, reg) \ + cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg))) + +#define CXGBE_PCI_REG_WRITE(reg, value) ({ \ + CXGBE_PCI_REG((reg)) = (value); }) + +#define CXGBE_WRITE_REG(adap, reg, value) \ + CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val) +{ + CXGBE_PCI_REG(addr) = val; + CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32); + return val; +} + +#define CXGBE_WRITE_REG64(adap, reg, value) \ + cxgbe_write_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)), (value)) + +/** + * t4_read_reg - read a HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 32-bit value of the given HW register. + */ +static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr) +{ + u32 val = CXGBE_READ_REG(adapter, reg_addr); + + CXGBE_DEBUG_REG(adapter, "read register 0x%x value 0x%x\n", reg_addr, + val); + return val; +} + +/** + * t4_write_reg - write a HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given HW register. + */ +static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val) +{ + CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr, + val); + CXGBE_WRITE_REG(adapter, reg_addr, val); +} + +/** + * t4_read_reg64 - read a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * + * Returns the 64-bit value of the given HW register. + */ +static inline u64 t4_read_reg64(struct adapter *adapter, u32 reg_addr) +{ + u64 val = CXGBE_READ_REG64(adapter, reg_addr); + + CXGBE_DEBUG_REG(adapter, "64-bit read register %#x value %#llx\n", + reg_addr, (unsigned long long)val); + return val; +} + +/** + * t4_write_reg64 - write a 64-bit HW register + * @adapter: the adapter + * @reg_addr: the register address + * @val: the value to write + * + * Write a 64-bit value into the given HW register. + */ +static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, + u64 val) +{ + CXGBE_DEBUG_REG(adapter, "setting register %#x to %#llx\n", reg_addr, + (unsigned long long)val); + + CXGBE_WRITE_REG64(adapter, reg_addr, val); +} + +/** + * t4_os_set_hw_addr - store a port's MAC address in SW + * @adapter: the adapter + * @port_idx: the port index + * @hw_addr: the Ethernet address + * + * Store the Ethernet address of the given port in SW. Called by the + * common code when it retrieves a port's Ethernet address from EEPROM. + */ +static inline void t4_os_set_hw_addr(struct adapter *adapter, int port_idx, + u8 hw_addr[]) +{ + struct port_info *pi = &adapter->port[port_idx]; + + ether_addr_copy((struct ether_addr *)hw_addr, + &pi->eth_dev->data->mac_addrs[0]); +} + +/** + * t4_os_lock_init - initialize spinlock + * @lock: the spinlock + */ +static inline void t4_os_lock_init(rte_spinlock_t *lock) +{ + rte_spinlock_init(lock); +} + +/** + * t4_os_lock - spin until lock is acquired + * @lock: the spinlock + */ +static inline void t4_os_lock(rte_spinlock_t *lock) +{ + rte_spinlock_lock(lock); +} + +/** + * t4_os_unlock - unlock a spinlock + * @lock: the spinlock + */ +static inline void t4_os_unlock(rte_spinlock_t *lock) +{ + rte_spinlock_unlock(lock); +} + +/** + * t4_os_init_list_head - initialize + * @head: head of list to initialize [to empty] + */ +static inline void t4_os_init_list_head(struct mbox_list *head) +{ + TAILQ_INIT(head); +} + +static inline struct mbox_entry *t4_os_list_first_entry(struct mbox_list *head) +{ + return TAILQ_FIRST(head); +} + +/** + * t4_os_atomic_add_tail - Enqueue list element atomically onto list + * @new: the entry to be addded to the queue + * @head: current head of the linked list + * @lock: lock to use to guarantee atomicity + */ +static inline void t4_os_atomic_add_tail(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_INSERT_TAIL(head, entry, next); + t4_os_unlock(lock); +} + +/** + * t4_os_atomic_list_del - Dequeue list element atomically from list + * @entry: the entry to be remove/dequeued from the list. + * @lock: the spinlock + */ +static inline void t4_os_atomic_list_del(struct mbox_entry *entry, + struct mbox_list *head, + rte_spinlock_t *lock) +{ + t4_os_lock(lock); + TAILQ_REMOVE(head, entry, next); + t4_os_unlock(lock); +} + +/** + * adap2pinfo - return the port_info of a port + * @adap: the adapter + * @idx: the port index + * + * Return the port_info structure for the port of the given index. + */ +static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) +{ + return &adap->port[idx]; +} + +void *t4_alloc_mem(size_t size); +void t4_free_mem(void *addr); +#define t4_os_alloc(_size) t4_alloc_mem((_size)) +#define t4_os_free(_ptr) t4_free_mem((_ptr)) + +void t4_os_portmod_changed(const struct adapter *adap, int port_id); +void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); + +void reclaim_completed_tx(struct sge_txq *q); +void t4_free_sge_resources(struct adapter *adap); +void t4_sge_tx_monitor_start(struct adapter *adap); +void t4_sge_tx_monitor_stop(struct adapter *adap); +int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf); +int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl); +int t4_sge_init(struct adapter *adap); +int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, + struct rte_eth_dev *eth_dev, uint16_t queue_id, + unsigned int iqid, int socket_id); +int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *rspq, bool fwevtq, + struct rte_eth_dev *eth_dev, int intr_idx, + struct sge_fl *fl, rspq_handler_t handler, + int cong, struct rte_mempool *mp, int queue_id, + int socket_id); +int t4_sge_eth_txq_start(struct sge_eth_txq *txq); +int t4_sge_eth_txq_stop(struct sge_eth_txq *txq); +void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq); +int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq); +int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq); +void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq); +void t4_sge_eth_clear_queues(struct port_info *pi); +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts, + unsigned int budget, unsigned int *work_done); +int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); + +#endif /* __T4_ADAPTER_H__ */ diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h new file mode 100644 index 0000000000..6ddc7d40f7 --- /dev/null +++ b/drivers/net/cxgbe/base/common.h @@ -0,0 +1,401 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __CHELSIO_COMMON_H +#define __CHELSIO_COMMON_H + +#include "cxgbe_compat.h" +#include "t4_hw.h" +#include "t4_chip_type.h" +#include "t4fw_interface.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define PAGE_SIZE RTE_PGSIZE_4K + +enum { + MAX_NPORTS = 4, /* max # of ports */ +}; + +enum { + MEMWIN0_APERTURE = 2048, + MEMWIN0_BASE = 0x1b800, +}; + +enum dev_master { MASTER_CANT, MASTER_MAY, MASTER_MUST }; + +enum dev_state { DEV_STATE_UNINIT, DEV_STATE_INIT, DEV_STATE_ERR }; + +enum { + PAUSE_RX = 1 << 0, + PAUSE_TX = 1 << 1, + PAUSE_AUTONEG = 1 << 2 +}; + +struct port_stats { + u64 tx_octets; /* total # of octets in good frames */ + u64 tx_frames; /* all good frames */ + u64 tx_bcast_frames; /* all broadcast frames */ + u64 tx_mcast_frames; /* all multicast frames */ + u64 tx_ucast_frames; /* all unicast frames */ + u64 tx_error_frames; /* all error frames */ + + u64 tx_frames_64; /* # of Tx frames in a particular range */ + u64 tx_frames_65_127; + u64 tx_frames_128_255; + u64 tx_frames_256_511; + u64 tx_frames_512_1023; + u64 tx_frames_1024_1518; + u64 tx_frames_1519_max; + + u64 tx_drop; /* # of dropped Tx frames */ + u64 tx_pause; /* # of transmitted pause frames */ + u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ + u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ + u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ + u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ + u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ + u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ + u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ + u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ + + u64 rx_octets; /* total # of octets in good frames */ + u64 rx_frames; /* all good frames */ + u64 rx_bcast_frames; /* all broadcast frames */ + u64 rx_mcast_frames; /* all multicast frames */ + u64 rx_ucast_frames; /* all unicast frames */ + u64 rx_too_long; /* # of frames exceeding MTU */ + u64 rx_jabber; /* # of jabber frames */ + u64 rx_fcs_err; /* # of received frames with bad FCS */ + u64 rx_len_err; /* # of received frames with length error */ + u64 rx_symbol_err; /* symbol errors */ + u64 rx_runt; /* # of short frames */ + + u64 rx_frames_64; /* # of Rx frames in a particular range */ + u64 rx_frames_65_127; + u64 rx_frames_128_255; + u64 rx_frames_256_511; + u64 rx_frames_512_1023; + u64 rx_frames_1024_1518; + u64 rx_frames_1519_max; + + u64 rx_pause; /* # of received pause frames */ + u64 rx_ppp0; /* # of received PPP prio 0 frames */ + u64 rx_ppp1; /* # of received PPP prio 1 frames */ + u64 rx_ppp2; /* # of received PPP prio 2 frames */ + u64 rx_ppp3; /* # of received PPP prio 3 frames */ + u64 rx_ppp4; /* # of received PPP prio 4 frames */ + u64 rx_ppp5; /* # of received PPP prio 5 frames */ + u64 rx_ppp6; /* # of received PPP prio 6 frames */ + u64 rx_ppp7; /* # of received PPP prio 7 frames */ + + u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ + u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ + u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ + u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ + u64 rx_trunc0; /* buffer-group 0 truncated packets */ + u64 rx_trunc1; /* buffer-group 1 truncated packets */ + u64 rx_trunc2; /* buffer-group 2 truncated packets */ + u64 rx_trunc3; /* buffer-group 3 truncated packets */ +}; + +struct sge_params { + u32 hps; /* host page size for our PF/VF */ + u32 eq_qpp; /* egress queues/page for our PF/VF */ + u32 iq_qpp; /* egress queues/page for our PF/VF */ +}; + +struct tp_params { + unsigned int ntxchan; /* # of Tx channels */ + unsigned int tre; /* log2 of core clocks per TP tick */ + unsigned int dack_re; /* DACK timer resolution */ + unsigned int la_mask; /* what events are recorded by TP LA */ + unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ + + u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ + u32 ingress_config; /* cached TP_INGRESS_CONFIG */ + + /* + * TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a + * subset of the set of fields which may be present in the Compressed + * Filter Tuple portion of filters and TCP TCB connections. The + * fields which are present are controlled by the TP_VLAN_PRI_MAP. + * Since a variable number of fields may or may not be present, their + * shifted field positions within the Compressed Filter Tuple may + * vary, or not even be present if the field isn't selected in + * TP_VLAN_PRI_MAP. Since some of these fields are needed in various + * places we store their offsets here, or a -1 if the field isn't + * present. + */ + int vlan_shift; + int vnic_shift; + int port_shift; + int protocol_shift; +}; + +struct vpd_params { + unsigned int cclk; +}; + +struct pci_params { + uint16_t vendor_id; + uint16_t device_id; + uint32_t vpd_cap_addr; + uint16_t speed; + uint8_t width; +}; + +/* + * Firmware device log. + */ +struct devlog_params { + u32 memtype; /* which memory (EDC0, EDC1, MC) */ + u32 start; /* start of log in firmware memory */ + u32 size; /* size of log */ +}; + +struct arch_specific_params { + u8 nchan; + u16 mps_rplc_size; + u16 vfcount; + u32 sge_fl_db; + u16 mps_tcam_size; +}; + +struct adapter_params { + struct sge_params sge; + struct tp_params tp; + struct vpd_params vpd; + struct pci_params pci; + struct devlog_params devlog; + enum pcie_memwin drv_memwin; + + unsigned int sf_size; /* serial flash size in bytes */ + unsigned int sf_nsec; /* # of flash sectors */ + + unsigned int fw_vers; + unsigned int tp_vers; + + unsigned short mtus[NMTUS]; + unsigned short a_wnd[NCCTRL_WIN]; + unsigned short b_wnd[NCCTRL_WIN]; + + unsigned int mc_size; /* MC memory size */ + unsigned int cim_la_size; + + unsigned char nports; /* # of ethernet ports */ + unsigned char portvec; + + enum chip_type chip; /* chip code */ + struct arch_specific_params arch; /* chip specific params */ + + bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ +}; + +struct link_config { + unsigned short supported; /* link capabilities */ + unsigned short advertising; /* advertised capabilities */ + unsigned short requested_speed; /* speed user has requested */ + unsigned short speed; /* actual link speed */ + unsigned char requested_fc; /* flow control user has requested */ + unsigned char fc; /* actual link flow control */ + unsigned char autoneg; /* autonegotiating? */ + unsigned char link_ok; /* link up? */ +}; + +#include "adapter.h" + +void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, + u32 val); +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, + int attempts, int delay, u32 *valp); + +static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay) +{ + return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, + delay, NULL); +} + +#define for_each_port(adapter, iter) \ + for (iter = 0; iter < (adapter)->params.nports; ++iter) + +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val); +void t4_intr_enable(struct adapter *adapter); +void t4_intr_disable(struct adapter *adapter); +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc); +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta); +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state); +int t4_fw_bye(struct adapter *adap, unsigned int mbox); +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset); +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset); +int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat); +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size); +int t4_fw_initialize(struct adapter *adap, unsigned int mbox); +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val); +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout); +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val); +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype); +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size); +int t4_free_vi(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int viid); +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok); +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt); +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en); +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id); +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid); + +static inline unsigned int core_ticks_per_usec(const struct adapter *adap) +{ + return adap->params.vpd.cclk / 1000; +} + +static inline unsigned int us_to_core_ticks(const struct adapter *adap, + unsigned int us) +{ + return (us * adap->params.vpd.cclk) / 1000; +} + +static inline unsigned int core_ticks_to_us(const struct adapter *adapter, + unsigned int ticks) +{ + /* add Core Clock / 2 to round ticks to nearest uS */ + return ((ticks * 1000 + adapter->params.vpd.cclk / 2) / + adapter->params.vpd.cclk); +} + +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl, bool sleep_ok, int timeout); +int t4_wr_mbox_meat(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, int size, + void *rpl, bool sleep_ok); + +static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox, + const void *cmd, int size, void *rpl, + int timeout) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true, + timeout); +} + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p); + +static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); +} + +static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, + int size, void *rpl) +{ + return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); +} + +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx); +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx); + +int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p); +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented); +int t4_flash_cfg_addr(struct adapter *adapter); +unsigned int t4_get_mps_bg_map(struct adapter *adapter, int idx); +const char *t4_get_port_type_description(enum fw_port_type port_type); +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset); +void t4_clr_port_stats(struct adapter *adap, int idx); +void t4_reset_link_config(struct adapter *adap, int idx); +int t4_get_fw_version(struct adapter *adapter, u32 *vers); +int t4_get_tp_version(struct adapter *adapter, u32 *vers); +int t4_get_flash_params(struct adapter *adapter); +int t4_prep_adapter(struct adapter *adapter); +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); +int t4_init_rss_mode(struct adapter *adap, int mbox); +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq); +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq); + +enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + unsigned int qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid); + +int t4_init_sge_params(struct adapter *adapter); +int t4_init_tp_params(struct adapter *adap); +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +#endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgbe/base/t4_chip_type.h b/drivers/net/cxgbe/base/t4_chip_type.h new file mode 100644 index 0000000000..1ca6803934 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_chip_type.h @@ -0,0 +1,79 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_CHIP_TYPE_H__ +#define __T4_CHIP_TYPE_H__ + +/* + * All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. or + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use the "version" (V) of the adpater to code the Chip Version above. + */ +#define CHELSIO_PCI_ID_VER(devid) ((devid) >> 12) +#define CHELSIO_PCI_ID_FUNC(devid) (((devid) >> 8) & 0xf) +#define CHELSIO_PCI_ID_PROD(devid) ((devid) & 0xff) + +#define CHELSIO_T4 0x4 +#define CHELSIO_T5 0x5 + +#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) +#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) +#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) + +enum chip_type { + T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), + T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), + T4_FIRST_REV = T4_A1, + T4_LAST_REV = T4_A2, + + T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), + T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), + T5_FIRST_REV = T5_A0, + T5_LAST_REV = T5_A1, +}; + +static inline int is_t4(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4); +} + +static inline int is_t5(enum chip_type chip) +{ + return (CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5); +} + +#endif /* __T4_CHIP_TYPE_H__ */ diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c new file mode 100644 index 0000000000..c57200eb2f --- /dev/null +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -0,0 +1,2686 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "t4_regs.h" +#include "t4_regs_values.h" +#include "t4fw_interface.h" + +static void init_link_config(struct link_config *lc, unsigned int caps); + +/** + * t4_read_mtu_tbl - returns the values in the HW path MTU table + * @adap: the adapter + * @mtus: where to store the MTU values + * @mtu_log: where to store the MTU base-2 log (may be %NULL) + * + * Reads the HW path MTU table. + */ +void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) +{ + u32 v; + int i; + + for (i = 0; i < NMTUS; ++i) { + t4_write_reg(adap, A_TP_MTU_TABLE, + V_MTUINDEX(0xff) | V_MTUVALUE(i)); + v = t4_read_reg(adap, A_TP_MTU_TABLE); + mtus[i] = G_MTUVALUE(v); + if (mtu_log) + mtu_log[i] = G_MTUWIDTH(v); + } +} + +/** + * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register + * @adap: the adapter + * @addr: the indirect TP register address + * @mask: specifies the field within the register to modify + * @val: new value for the field + * + * Sets a field of an indirect TP register to the given value. + */ +void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, + unsigned int mask, unsigned int val) +{ + t4_write_reg(adap, A_TP_PIO_ADDR, addr); + val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask; + t4_write_reg(adap, A_TP_PIO_DATA, val); +} + +/* The minimum additive increment value for the congestion control table */ +#define CC_MIN_INCR 2U + +/** + * t4_load_mtus - write the MTU and congestion control HW tables + * @adap: the adapter + * @mtus: the values for the MTU table + * @alpha: the values for the congestion control alpha parameter + * @beta: the values for the congestion control beta parameter + * + * Write the HW MTU table with the supplied MTUs and the high-speed + * congestion control table with the supplied alpha, beta, and MTUs. + * We write the two tables together because the additive increments + * depend on the MTUs. + */ +void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, + const unsigned short *alpha, const unsigned short *beta) +{ + static const unsigned int avg_pkts[NCCTRL_WIN] = { + 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, + 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, + 28672, 40960, 57344, 81920, 114688, 163840, 229376 + }; + + unsigned int i, w; + + for (i = 0; i < NMTUS; ++i) { + unsigned int mtu = mtus[i]; + unsigned int log2 = fls(mtu); + + if (!(mtu & ((1 << log2) >> 2))) /* round */ + log2--; + t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) | + V_MTUWIDTH(log2) | V_MTUVALUE(mtu)); + + for (w = 0; w < NCCTRL_WIN; ++w) { + unsigned int inc; + + inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], + CC_MIN_INCR); + + t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) | + (w << 16) | (beta[w] << 13) | inc); + } + } +} + +/** + * t4_wait_op_done_val - wait until an operation is completed + * @adapter: the adapter performing the operation + * @reg: the register to check for completion + * @mask: a single-bit field within @reg that indicates completion + * @polarity: the value of the field when the operation is completed + * @attempts: number of check iterations + * @delay: delay in usecs between iterations + * @valp: where to store the value of the register at completion time + * + * Wait until an operation is completed by checking a bit in a register + * up to @attempts times. If @valp is not NULL the value of the register + * at the time it indicated completion is stored there. Returns 0 if the + * operation completes and -EAGAIN otherwise. + */ +int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, + int polarity, int attempts, int delay, u32 *valp) +{ + while (1) { + u32 val = t4_read_reg(adapter, reg); + + if (!!(val & mask) == polarity) { + if (valp) + *valp = val; + return 0; + } + if (--attempts == 0) + return -EAGAIN; + if (delay) + udelay(delay); + } +} + +/** + * t4_set_reg_field - set a register field to a value + * @adapter: the adapter to program + * @addr: the register address + * @mask: specifies the portion of the register to modify + * @val: the new value for the register field + * + * Sets a register field specified by the supplied mask to the + * given value. + */ +void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, + u32 val) +{ + u32 v = t4_read_reg(adapter, addr) & ~mask; + + t4_write_reg(adapter, addr, v | val); + (void)t4_read_reg(adapter, addr); /* flush */ +} + +/** + * t4_read_indirect - read indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect address + * @data_reg: register holding the value of the indirect register + * @vals: where the read register values are stored + * @nregs: how many indirect registers to read + * @start_idx: index of first indirect register to read + * + * Reads registers that are accessed indirectly through an address/data + * register pair. + */ +void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, u32 *vals, unsigned int nregs, + unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx); + *vals++ = t4_read_reg(adap, data_reg); + start_idx++; + } +} + +/** + * t4_write_indirect - write indirectly addressed registers + * @adap: the adapter + * @addr_reg: register holding the indirect addresses + * @data_reg: register holding the value for the indirect registers + * @vals: values to write + * @nregs: how many indirect registers to write + * @start_idx: address of first indirect register to write + * + * Writes a sequential block of registers that are accessed indirectly + * through an address/data register pair. + */ +void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, + unsigned int data_reg, const u32 *vals, + unsigned int nregs, unsigned int start_idx) +{ + while (nregs--) { + t4_write_reg(adap, addr_reg, start_idx++); + t4_write_reg(adap, data_reg, *vals++); + } +} + +/** + * t4_report_fw_error - report firmware error + * @adap: the adapter + * + * The adapter firmware can indicate error conditions to the host. + * If the firmware has indicated an error, print out the reason for + * the firmware error. + */ +static void t4_report_fw_error(struct adapter *adap) +{ + static const char * const reason[] = { + "Crash", /* PCIE_FW_EVAL_CRASH */ + "During Device Preparation", /* PCIE_FW_EVAL_PREP */ + "During Device Configuration", /* PCIE_FW_EVAL_CONF */ + "During Device Initialization", /* PCIE_FW_EVAL_INIT */ + "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ + "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ + "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ + "Reserved", /* reserved */ + }; + u32 pcie_fw; + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (pcie_fw & F_PCIE_FW_ERR) + pr_err("%s: Firmware reports adapter error: %s\n", + __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]); +} + +/* + * Get the reply to a mailbox command and store it in @rpl in big-endian order. + */ +static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, + u32 mbox_addr) +{ + for ( ; nflit; nflit--, mbox_addr += 8) + *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr)); +} + +/* + * Handle a FW assertion reported in a mailbox. + */ +static void fw_asrt(struct adapter *adap, u32 mbox_addr) +{ + struct fw_debug_cmd asrt; + + get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); + pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", + asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line), + be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y)); +} + +#define X_CIM_PF_NOACCESS 0xeeeeeeee + +/* + * If the Host OS Driver needs locking arround accesses to the mailbox, this + * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ... + */ +/* makes single-statement usage a bit cleaner ... */ +#ifdef T4_OS_NEEDS_MBOX_LOCKING +#define T4_OS_MBOX_LOCKING(x) x +#else +#define T4_OS_MBOX_LOCKING(x) do {} while (0) +#endif + +/** + * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox + * @adap: the adapter + * @mbox: index of the mailbox to use + * @cmd: the command to write + * @size: command length in bytes + * @rpl: where to optionally store the reply + * @sleep_ok: if true we may sleep while awaiting command completion + * @timeout: time to wait for command to finish before timing out + * (negative implies @sleep_ok=false) + * + * Sends the given command to FW through the selected mailbox and waits + * for the FW to execute the command. If @rpl is not %NULL it is used to + * store the FW's reply to the command. The command and its optional + * reply are of the same length. Some FW commands like RESET and + * INITIALIZE can take a considerable amount of time to execute. + * @sleep_ok determines whether we may sleep while awaiting the response. + * If sleeping is allowed we use progressive backoff otherwise we spin. + * Note that passing in a negative @timeout is an alternate mechanism + * for specifying @sleep_ok=false. This is useful when a higher level + * interface allows for specification of @timeout but not @sleep_ok ... + * + * Returns 0 on success or a negative errno on failure. A + * failure can happen either because we are not able to execute the + * command or FW executes it but signals an error. In the latter case + * the return value is the error code indicated by FW (negated). + */ +int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, + const void __attribute__((__may_alias__)) *cmd, + int size, void *rpl, bool sleep_ok, int timeout) +{ + /* + * We delay in small increments at first in an effort to maintain + * responsiveness for simple, fast executing commands but then back + * off to larger delays to a maximum retry delay. + */ + static const int delay[] = { + 1, 1, 3, 5, 10, 10, 20, 50, 100 + }; + + u32 v; + u64 res; + int i, ms; + unsigned int delay_idx; + __be64 *temp = (__be64 *)malloc(size * sizeof(char)); + __be64 *p = temp; + u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA); + u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL); + u32 ctl; + struct mbox_entry entry; + u32 pcie_fw = 0; + + if ((size & 15) || size > MBOX_LEN) { + free(temp); + return -EINVAL; + } + + bzero(p, size); + memcpy(p, (const __be64 *)cmd, size); + + /* + * If we have a negative timeout, that implies that we can't sleep. + */ + if (timeout < 0) { + sleep_ok = false; + timeout = -timeout; + } + +#ifdef T4_OS_NEEDS_MBOX_LOCKING + /* + * Queue ourselves onto the mailbox access list. When our entry is at + * the front of the list, we have rights to access the mailbox. So we + * wait [for a while] till we're at the front [or bail out with an + * EBUSY] ... + */ + t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock); + + delay_idx = 0; + ms = delay[0]; + + for (i = 0; ; i += ms) { + /* + * If we've waited too long, return a busy indication. This + * really ought to be based on our initial position in the + * mailbox access list but this is a start. We very rarely + * contend on access to the mailbox ... Also check for a + * firmware error which we'll report as a device error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) { + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock); + t4_report_fw_error(adap); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY; + } + + /* + * If we're at the head, break out and start the mailbox + * protocol. + */ + if (t4_os_list_first_entry(&adap->mbox_list) == &entry) + break; + + /* + * Delay for a bit before checking again ... + */ + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + rte_delay_ms(ms); + } + } +#endif /* T4_OS_NEEDS_MBOX_LOCKING */ + + /* + * Attempt to gain access to the mailbox. + */ + for (i = 0; i < 4; i++) { + ctl = t4_read_reg(adap, ctl_reg); + v = G_MBOWNER(ctl); + if (v != X_MBOWNER_NONE) + break; + } + + /* + * If we were unable to gain access, dequeue ourselves from the + * mailbox atomic access list and report the error to our caller. + */ + if (v != X_MBOWNER_PL) { + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT); + } + + /* + * If we gain ownership of the mailbox and there's a "valid" message + * in it, this is likely an asynchronous error message from the + * firmware. So we'll report that and then proceed on with attempting + * to issue our own command ... which may well fail if the error + * presaged the firmware crashing ... + */ + if (ctl & F_MBMSGVALID) { + dev_err(adap, "found VALID command in mbox %u: " + "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + } + + /* + * Copy in the new mailbox command and send it on its way ... + */ + for (i = 0; i < size; i += 8, p++) + t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p)); + + CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW)); + t4_read_reg(adap, ctl_reg); /* flush write */ + + delay_idx = 0; + ms = delay[0]; + + /* + * Loop waiting for the reply; bail out if we time out or the firmware + * reports an error. + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) { + if (sleep_ok) { + ms = delay[delay_idx]; /* last element may repeat */ + if (delay_idx < ARRAY_SIZE(delay) - 1) + delay_idx++; + msleep(ms); + } else { + msleep(ms); + } + + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + v = t4_read_reg(adap, ctl_reg); + if (v == X_CIM_PF_NOACCESS) + continue; + if (G_MBOWNER(v) == X_MBOWNER_PL) { + if (!(v & F_MBMSGVALID)) { + t4_write_reg(adap, ctl_reg, + V_MBOWNER(X_MBOWNER_NONE)); + continue; + } + + CXGBE_DEBUG_MBOX(adap, + "%s: mbox %u: %016llx %016llx %016llx %016llx " + "%016llx %016llx %016llx %016llx\n", __func__, (mbox), + (unsigned long long)t4_read_reg64(adap, data_reg), + (unsigned long long)t4_read_reg64(adap, data_reg + 8), + (unsigned long long)t4_read_reg64(adap, data_reg + 16), + (unsigned long long)t4_read_reg64(adap, data_reg + 24), + (unsigned long long)t4_read_reg64(adap, data_reg + 32), + (unsigned long long)t4_read_reg64(adap, data_reg + 40), + (unsigned long long)t4_read_reg64(adap, data_reg + 48), + (unsigned long long)t4_read_reg64(adap, data_reg + 56)); + + CXGBE_DEBUG_MBOX(adap, + "command %#x completed in %d ms (%ssleeping)\n", + *(const u8 *)cmd, + i + ms, sleep_ok ? "" : "non-"); + + res = t4_read_reg64(adap, data_reg); + if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) { + fw_asrt(adap, data_reg); + res = V_FW_CMD_RETVAL(EIO); + } else if (rpl) { + get_mbox_rpl(adap, rpl, size / 8, data_reg); + } + t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE)); + T4_OS_MBOX_LOCKING( + t4_os_atomic_list_del(&entry, &adap->mbox_list, + &adap->mbox_lock)); + return -G_FW_CMD_RETVAL((int)res); + } + } + + /* + * We timed out waiting for a reply to our mailbox command. Report + * the error and also check to see if the firmware reported any + * errors ... + */ + dev_err(adap, "command %#x in mailbox %d timed out\n", + *(const u8 *)cmd, mbox); + T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry, + &adap->mbox_list, + &adap->mbox_lock)); + t4_report_fw_error(adap); + free(temp); + return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT; +} + +int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, + void *rpl, bool sleep_ok) +{ + return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_config_rss_range - configure a portion of the RSS mapping table + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: virtual interface whose RSS subtable is to be written + * @start: start entry in the table to write + * @n: how many table entries to write + * @rspq: values for the "response queue" (Ingress Queue) lookup table + * @nrspq: number of values in @rspq + * + * Programs the selected part of the VI's RSS mapping table with the + * provided values. If @nrspq < @n the supplied values are used repeatedly + * until the full table range is populated. + * + * The caller must ensure the values in @rspq are in the range allowed for + * @viid. + */ +int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, + int start, int n, const u16 *rspq, unsigned int nrspq) +{ + int ret; + const u16 *rsp = rspq; + const u16 *rsp_end = rspq + nrspq; + struct fw_rss_ind_tbl_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_IND_TBL_CMD_VIID(viid)); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + + /* + * Each firmware RSS command can accommodate up to 32 RSS Ingress + * Queue Identifiers. These Ingress Queue IDs are packed three to + * a 32-bit word as 10-bit values with the upper remaining 2 bits + * reserved. + */ + while (n > 0) { + int nq = min(n, 32); + int nq_packed = 0; + __be32 *qp = &cmd.iq0_to_iq2; + + /* + * Set up the firmware RSS command header to send the next + * "nq" Ingress Queue IDs to the firmware. + */ + cmd.niqid = cpu_to_be16(nq); + cmd.startidx = cpu_to_be16(start); + + /* + * "nq" more done for the start of the next loop. + */ + start += nq; + n -= nq; + + /* + * While there are still Ingress Queue IDs to stuff into the + * current firmware RSS command, retrieve them from the + * Ingress Queue ID array and insert them into the command. + */ + while (nq > 0) { + /* + * Grab up to the next 3 Ingress Queue IDs (wrapping + * around the Ingress Queue ID array if necessary) and + * insert them into the firmware RSS command at the + * current 3-tuple position within the commad. + */ + u16 qbuf[3]; + u16 *qbp = qbuf; + int nqbuf = min(3, nq); + + nq -= nqbuf; + qbuf[0] = 0; + qbuf[1] = 0; + qbuf[2] = 0; + while (nqbuf && nq_packed < 32) { + nqbuf--; + nq_packed++; + *qbp++ = *rsp++; + if (rsp >= rsp_end) + rsp = rspq; + } + *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) | + V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) | + V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2])); + } + + /* + * Send this portion of the RRS table update to the firmware; + * bail out on any errors. + */ + ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); + if (ret) + return ret; + } + + return 0; +} + +/** + * t4_config_vi_rss - configure per VI RSS settings + * @adapter: the adapter + * @mbox: mbox to use for the FW command + * @viid: the VI id + * @flags: RSS flags + * @defq: id of the default RSS queue for the VI. + * + * Configures VI-specific RSS properties. + */ +int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid, + unsigned int flags, unsigned int defq) +{ + struct fw_rss_vi_config_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_RSS_VI_CONFIG_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags | + V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq)); + return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); +} + +/** + * init_cong_ctrl - initialize congestion control parameters + * @a: the alpha values for congestion control + * @b: the beta values for congestion control + * + * Initialize the congestion control parameters. + */ +static void init_cong_ctrl(unsigned short *a, unsigned short *b) +{ + int i; + + for (i = 0; i < 9; i++) { + a[i] = 1; + b[i] = 0; + } + + a[9] = 2; + a[10] = 3; + a[11] = 4; + a[12] = 5; + a[13] = 6; + a[14] = 7; + a[15] = 8; + a[16] = 9; + a[17] = 10; + a[18] = 14; + a[19] = 17; + a[20] = 21; + a[21] = 25; + a[22] = 30; + a[23] = 35; + a[24] = 45; + a[25] = 60; + a[26] = 80; + a[27] = 100; + a[28] = 200; + a[29] = 300; + a[30] = 400; + a[31] = 500; + + b[9] = 1; + b[10] = 1; + b[11] = 2; + b[12] = 2; + b[13] = 3; + b[14] = 3; + b[15] = 3; + b[16] = 3; + b[17] = 4; + b[18] = 4; + b[19] = 4; + b[20] = 4; + b[21] = 4; + b[22] = 5; + b[23] = 5; + b[24] = 5; + b[25] = 5; + b[26] = 5; + b[27] = 5; + b[28] = 6; + b[29] = 6; + b[30] = 7; + b[31] = 7; +} + +#define INIT_CMD(var, cmd, rd_wr) do { \ + (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \ + F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \ + (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \ +} while (0) + +int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p) +{ + u32 cclk_param, cclk_val; + int ret; + + /* + * Ask firmware for the Core Clock since it knows how to translate the + * Reference Clock ('V2') VPD field into a Core Clock value ... + */ + cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | + V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); + ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, + 1, &cclk_param, &cclk_val); + if (ret) { + dev_err(adapter, "%s: error in fetching from coreclock - %d\n", + __func__, ret); + return ret; + } + + p->cclk = cclk_val; + dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk); + return 0; +} + +/* serial flash and firmware constants and flash config file constants */ +enum { + SF_ATTEMPTS = 10, /* max retries for SF operations */ + + /* flash command opcodes */ + SF_PROG_PAGE = 2, /* program page */ + SF_WR_DISABLE = 4, /* disable writes */ + SF_RD_STATUS = 5, /* read status register */ + SF_WR_ENABLE = 6, /* enable writes */ + SF_RD_DATA_FAST = 0xb, /* read flash */ + SF_RD_ID = 0x9f, /* read ID */ + SF_ERASE_SECTOR = 0xd8, /* erase sector */ +}; + +/** + * sf1_read - read data from the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to read + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @valp: where to store the read data + * + * Reads up to 4 bytes of data from the serial flash. The location of + * the read needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 *valp) +{ + int ret; + + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_OP, + V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1)); + ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); + if (!ret) + *valp = t4_read_reg(adapter, A_SF_DATA); + return ret; +} + +/** + * sf1_write - write data to the serial flash + * @adapter: the adapter + * @byte_cnt: number of bytes to write + * @cont: whether another operation will be chained + * @lock: whether to lock SF for PL access only + * @val: value to write + * + * Writes up to 4 bytes of data to the serial flash. The location of + * the write needs to be specified prior to calling this by issuing the + * appropriate commands to the serial flash. + */ +static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, + int lock, u32 val) +{ + if (!byte_cnt || byte_cnt > 4) + return -EINVAL; + if (t4_read_reg(adapter, A_SF_OP) & F_BUSY) + return -EBUSY; + t4_write_reg(adapter, A_SF_DATA, val); + t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) | + V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1)); + return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5); +} + +/** + * t4_read_flash - read words from serial flash + * @adapter: the adapter + * @addr: the start address for the read + * @nwords: how many 32-bit words to read + * @data: where to store the read data + * @byte_oriented: whether to store data as bytes or as words + * + * Read the specified number of 32-bit words from the serial flash. + * If @byte_oriented is set the read data is stored as a byte array + * (i.e., big-endian), otherwise as 32-bit words in the platform's + * natural endianness. + */ +int t4_read_flash(struct adapter *adapter, unsigned int addr, + unsigned int nwords, u32 *data, int byte_oriented) +{ + int ret; + + if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) || + (addr & 3)) + return -EINVAL; + + addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST; + + ret = sf1_write(adapter, 4, 1, 0, addr); + if (ret != 0) + return ret; + + ret = sf1_read(adapter, 1, 1, 0, data); + if (ret != 0) + return ret; + + for ( ; nwords; nwords--, data++) { + ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); + if (nwords == 1) + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret) + return ret; + if (byte_oriented) + *data = cpu_to_be32(*data); + } + return 0; +} + +/** + * t4_get_fw_version - read the firmware version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the FW version from flash. + */ +int t4_get_fw_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, fw_ver), 1, vers, 0); +} + +/** + * t4_get_tp_version - read the TP microcode version + * @adapter: the adapter + * @vers: where to place the version + * + * Reads the TP microcode version from flash. + */ +int t4_get_tp_version(struct adapter *adapter, u32 *vers) +{ + return t4_read_flash(adapter, FLASH_FW_START + + offsetof(struct fw_hdr, tp_microcode_ver), + 1, vers, 0); +} + +#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ + FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) + +/** + * t4_link_l1cfg - apply link configuration to MAC/PHY + * @phy: the PHY to setup + * @mac: the MAC to setup + * @lc: the requested link configuration + * + * Set up a port's MAC and PHY according to a desired link configuration. + * - If the PHY can auto-negotiate first decide what to advertise, then + * enable/disable auto-negotiation as desired, and reset. + * - If the PHY does not auto-negotiate just reset it. + * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, + * otherwise do it later based on the outcome of auto-negotiation. + */ +int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port, + struct link_config *lc) +{ + struct fw_port_cmd c; + unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO); + + lc->link_ok = 0; + if (lc->requested_fc & PAUSE_RX) + fc |= FW_PORT_CAP_FC_RX; + if (lc->requested_fc & PAUSE_TX) + fc |= FW_PORT_CAP_FC_TX; + + memset(&c, 0, sizeof(c)); + c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_PORT_CMD_PORTID(port)); + c.action_to_len16 = + cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | + FW_LEN16(c)); + + if (!(lc->supported & FW_PORT_CAP_ANEG)) { + c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) | + fc); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else if (lc->autoneg == AUTONEG_DISABLE) { + c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi); + lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); + } else { + c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi); + } + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_flash_cfg_addr - return the address of the flash configuration file + * @adapter: the adapter + * + * Return the address within the flash where the Firmware Configuration + * File is stored, or an error if the device FLASH is too small to contain + * a Firmware Configuration File. + */ +int t4_flash_cfg_addr(struct adapter *adapter) +{ + /* + * If the device FLASH isn't large enough to hold a Firmware + * Configuration File, return an error. + */ + if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE) + return -ENOSPC; + + return FLASH_CFG_START; +} + +#define PF_INTR_MASK (F_PFSW | F_PFCIM) + +/** + * t4_intr_enable - enable interrupts + * @adapter: the adapter whose interrupts should be enabled + * + * Enable PF-specific interrupts for the calling function and the top-level + * interrupt concentrator for global interrupts. Interrupts are already + * enabled at each module, here we just enable the roots of the interrupt + * hierarchies. + * + * Note: this function should be called only when the driver manages + * non PF-specific interrupts from the various HW modules. Only one PCI + * function at a time should be doing this. + */ +void t4_intr_enable(struct adapter *adapter) +{ + u32 val = 0; + u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); + + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT; + t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE | + F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 | + F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR | + F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 | + F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 | + F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | + F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val); + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf); +} + +/** + * t4_intr_disable - disable interrupts + * @adapter: the adapter whose interrupts should be disabled + * + * Disable interrupts. We only disable the top-level interrupt + * concentrators. The caller must be a PCI function managing global + * interrupts. + */ +void t4_intr_disable(struct adapter *adapter) +{ + u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI)); + + t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0); + t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0); +} + +/** + * t4_get_port_type_description - return Port Type string description + * @port_type: firmware Port Type enumeration + */ +const char *t4_get_port_type_description(enum fw_port_type port_type) +{ + static const char * const port_type_description[] = { + "Fiber_XFI", + "Fiber_XAUI", + "BT_SGMII", + "BT_XFI", + "BT_XAUI", + "KX4", + "CX4", + "KX", + "KR", + "SFP", + "BP_AP", + "BP4_AP", + "QSFP_10G", + "QSA", + "QSFP", + "BP40_BA", + }; + + if (port_type < ARRAY_SIZE(port_type_description)) + return port_type_description[port_type]; + return "UNKNOWN"; +} + +/** + * t4_get_mps_bg_map - return the buffer groups associated with a port + * @adap: the adapter + * @idx: the port index + * + * Returns a bitmap indicating which MPS buffer groups are associated + * with the given port. Bit i is set if buffer group i is used by the + * port. + */ +unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx) +{ + u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL)); + + if (n == 0) + return idx == 0 ? 0xf : 0; + if (n == 1) + return idx < 2 ? (3 << (2 * idx)) : 0; + return 1 << idx; +} + +/** + * t4_get_port_stats - collect port statistics + * @adap: the adapter + * @idx: the port index + * @p: the stats structure to fill + * + * Collect statistics related to the given port from HW. + */ +void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) +{ + u32 bgmap = t4_get_mps_bg_map(adap, idx); + +#define GET_STAT(name) \ + t4_read_reg64(adap, \ + (is_t4(adap->params.chip) ? \ + PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\ + T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L))) +#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L) + + p->tx_octets = GET_STAT(TX_PORT_BYTES); + p->tx_frames = GET_STAT(TX_PORT_FRAMES); + p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); + p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); + p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); + p->tx_error_frames = GET_STAT(TX_PORT_ERROR); + p->tx_frames_64 = GET_STAT(TX_PORT_64B); + p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); + p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); + p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); + p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); + p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); + p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); + p->tx_drop = GET_STAT(TX_PORT_DROP); + p->tx_pause = GET_STAT(TX_PORT_PAUSE); + p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); + p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); + p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); + p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); + p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); + p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); + p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); + p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); + + p->rx_octets = GET_STAT(RX_PORT_BYTES); + p->rx_frames = GET_STAT(RX_PORT_FRAMES); + p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); + p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); + p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); + p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); + p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); + p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); + p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); + p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); + p->rx_runt = GET_STAT(RX_PORT_LESS_64B); + p->rx_frames_64 = GET_STAT(RX_PORT_64B); + p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); + p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); + p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); + p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); + p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); + p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); + p->rx_pause = GET_STAT(RX_PORT_PAUSE); + p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); + p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); + p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); + p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); + p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); + p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); + p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); + p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); + p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; + p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; + p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; + p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; + p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; + p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; + p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; + p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; + +#undef GET_STAT +#undef GET_STAT_COM +} + +/** + * t4_get_port_stats_offset - collect port stats relative to a previous snapshot + * @adap: The adapter + * @idx: The port + * @stats: Current stats to fill + * @offset: Previous stats snapshot + */ +void t4_get_port_stats_offset(struct adapter *adap, int idx, + struct port_stats *stats, + struct port_stats *offset) +{ + u64 *s, *o; + unsigned int i; + + t4_get_port_stats(adap, idx, stats); + for (i = 0, s = (u64 *)stats, o = (u64 *)offset; + i < (sizeof(struct port_stats) / sizeof(u64)); + i++, s++, o++) + *s -= *o; +} + +/** + * t4_clr_port_stats - clear port statistics + * @adap: the adapter + * @idx: the port index + * + * Clear HW statistics for the given port. + */ +void t4_clr_port_stats(struct adapter *adap, int idx) +{ + unsigned int i; + u32 bgmap = t4_get_mps_bg_map(adap, idx); + u32 port_base_addr; + + if (is_t4(adap->params.chip)) + port_base_addr = PORT_BASE(idx); + else + port_base_addr = T5_PORT_BASE(idx); + + for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L; + i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8) + t4_write_reg(adap, port_base_addr + i, 0); + for (i = 0; i < 4; i++) + if (bgmap & (1 << i)) { + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L + + i * 8, 0); + t4_write_reg(adap, + A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L + + i * 8, 0); + } +} + +/** + * t4_fw_hello - establish communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @evt_mbox: mailbox to receive async FW events + * @master: specifies the caller's willingness to be the device master + * @state: returns the current device state (if non-NULL) + * + * Issues a command to establish communication with FW. Returns either + * an error (negative integer) or the mailbox of the Master PF. + */ +int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, + enum dev_master master, enum dev_state *state) +{ + int ret; + struct fw_hello_cmd c; + u32 v; + unsigned int master_mbox; + int retries = FW_CMD_HELLO_RETRIES; + +retry: + memset(&c, 0, sizeof(c)); + INIT_CMD(c, HELLO, WRITE); + c.err_to_clearinit = cpu_to_be32( + V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | + V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | + V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : + M_FW_HELLO_CMD_MBMASTER) | + V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | + V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) | + F_FW_HELLO_CMD_CLEARINIT); + + /* + * Issue the HELLO command to the firmware. If it's not successful + * but indicates that we got a "busy" or "timeout" condition, retry + * the HELLO until we exhaust our retry limit. If we do exceed our + * retry limit, check to see if the firmware left us any error + * information and report that if so ... + */ + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret != FW_SUCCESS) { + if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) + goto retry; + if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR) + t4_report_fw_error(adap); + return ret; + } + + v = be32_to_cpu(c.err_to_clearinit); + master_mbox = G_FW_HELLO_CMD_MBMASTER(v); + if (state) { + if (v & F_FW_HELLO_CMD_ERR) + *state = DEV_STATE_ERR; + else if (v & F_FW_HELLO_CMD_INIT) + *state = DEV_STATE_INIT; + else + *state = DEV_STATE_UNINIT; + } + + /* + * If we're not the Master PF then we need to wait around for the + * Master PF Driver to finish setting up the adapter. + * + * Note that we also do this wait if we're a non-Master-capable PF and + * there is no current Master PF; a Master PF may show up momentarily + * and we wouldn't want to fail pointlessly. (This can happen when an + * OS loads lots of different drivers rapidly at the same time). In + * this case, the Master PF returned by the firmware will be + * M_PCIE_FW_MASTER so the test below will work ... + */ + if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 && + master_mbox != mbox) { + int waiting = FW_CMD_HELLO_TIMEOUT; + + /* + * Wait for the firmware to either indicate an error or + * initialized state. If we see either of these we bail out + * and report the issue to the caller. If we exhaust the + * "hello timeout" and we haven't exhausted our retries, try + * again. Otherwise bail with a timeout error. + */ + for (;;) { + u32 pcie_fw; + + msleep(50); + waiting -= 50; + + /* + * If neither Error nor Initialialized are indicated + * by the firmware keep waiting till we exaust our + * timeout ... and then retry if we haven't exhausted + * our retries ... + */ + pcie_fw = t4_read_reg(adap, A_PCIE_FW); + if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) { + if (waiting <= 0) { + if (retries-- > 0) + goto retry; + + return -ETIMEDOUT; + } + continue; + } + + /* + * We either have an Error or Initialized condition + * report errors preferentially. + */ + if (state) { + if (pcie_fw & F_PCIE_FW_ERR) + *state = DEV_STATE_ERR; + else if (pcie_fw & F_PCIE_FW_INIT) + *state = DEV_STATE_INIT; + } + + /* + * If we arrived before a Master PF was selected and + * there's not a valid Master PF, grab its identity + * for our caller. + */ + if (master_mbox == M_PCIE_FW_MASTER && + (pcie_fw & F_PCIE_FW_MASTER_VLD)) + master_mbox = G_PCIE_FW_MASTER(pcie_fw); + break; + } + } + + return master_mbox; +} + +/** + * t4_fw_bye - end communication with FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to terminate communication with FW. + */ +int t4_fw_bye(struct adapter *adap, unsigned int mbox) +{ + struct fw_bye_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, BYE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_reset - issue a reset to FW + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @reset: specifies the type of reset to perform + * + * Issues a reset command of the specified type to FW. + */ +int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) +{ + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(reset); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_fw_halt - issue a reset/halt to FW and put uP into RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @force: force uP into RESET even if FW RESET command fails + * + * Issues a RESET command to firmware (if desired) with a HALT indication + * and then puts the microprocessor into RESET state. The RESET command + * will only be issued if a legitimate mailbox is provided (mbox <= + * M_PCIE_FW_MASTER). + * + * This is generally used in order for the host to safely manipulate the + * adapter without fear of conflicting with whatever the firmware might + * be doing. The only way out of this state is to RESTART the firmware + * ... + */ +int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) +{ + int ret = 0; + + /* + * If a legitimate mailbox is provided, issue a RESET command + * with a HALT indication. + */ + if (mbox <= M_PCIE_FW_MASTER) { + struct fw_reset_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, RESET, WRITE); + c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE); + c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); + } + + /* + * Normally we won't complete the operation if the firmware RESET + * command fails but if our caller insists we'll go ahead and put the + * uP into RESET. This can be useful if the firmware is hung or even + * missing ... We'll have to take the risk of putting the uP into + * RESET without the cooperation of firmware in that case. + * + * We also force the firmware's HALT flag to be on in case we bypassed + * the firmware RESET command above or we're dealing with old firmware + * which doesn't have the HALT capability. This will serve as a flag + * for the incoming firmware to know that it's coming out of a HALT + * rather than a RESET ... if it's new enough to understand that ... + */ + if (ret == 0 || force) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST); + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, + F_PCIE_FW_HALT); + } + + /* + * And we always return the result of the firmware RESET command + * even when we force the uP into RESET ... + */ + return ret; +} + +/** + * t4_fw_restart - restart the firmware by taking the uP out of RESET + * @adap: the adapter + * @mbox: mailbox to use for the FW RESET command (if desired) + * @reset: if we want to do a RESET to restart things + * + * Restart firmware previously halted by t4_fw_halt(). On successful + * return the previous PF Master remains as the new PF Master and there + * is no need to issue a new HELLO command, etc. + * + * We do this in two ways: + * + * 1. If we're dealing with newer firmware we'll simply want to take + * the chip's microprocessor out of RESET. This will cause the + * firmware to start up from its start vector. And then we'll loop + * until the firmware indicates it's started again (PCIE_FW.HALT + * reset to 0) or we timeout. + * + * 2. If we're dealing with older firmware then we'll need to RESET + * the chip since older firmware won't recognize the PCIE_FW.HALT + * flag and automatically RESET itself on startup. + */ +int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) +{ + if (reset) { + /* + * Since we're directing the RESET instead of the firmware + * doing it automatically, we need to clear the PCIE_FW.HALT + * bit. + */ + t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0); + + /* + * If we've been given a valid mailbox, first try to get the + * firmware to do the RESET. If that works, great and we can + * return success. Otherwise, if we haven't been given a + * valid mailbox or the RESET command failed, fall back to + * hitting the chip with a hammer. + */ + if (mbox <= M_PCIE_FW_MASTER) { + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + msleep(100); + if (t4_fw_reset(adap, mbox, + F_PIORST | F_PIORSTMODE) == 0) + return 0; + } + + t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE); + msleep(2000); + } else { + int ms; + + t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0); + for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { + if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT)) + return FW_SUCCESS; + msleep(100); + ms += 100; + } + return -ETIMEDOUT; + } + return 0; +} + +/** + * t4_fixup_host_params_compat - fix up host-dependent parameters + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * @chip_compat: maintain compatibility with designated chip + * + * Various registers in the chip contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * @chip_compat is used to limit the set of changes that are made + * to be compatible with the indicated chip release. This is used by + * drivers to maintain compatibility with chip register settings when + * the drivers haven't [yet] been updated with new chip support. + */ +int t4_fixup_host_params_compat(struct adapter *adap, + unsigned int page_size, + unsigned int cache_line_size, + enum chip_type chip_compat) +{ + unsigned int page_shift = fls(page_size) - 1; + unsigned int sge_hps = page_shift - 10; + unsigned int stat_len = cache_line_size > 64 ? 128 : 64; + unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; + unsigned int fl_align_log = fls(fl_align) - 1; + + t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE, + V_HOSTPAGESIZEPF0(sge_hps) | + V_HOSTPAGESIZEPF1(sge_hps) | + V_HOSTPAGESIZEPF2(sge_hps) | + V_HOSTPAGESIZEPF3(sge_hps) | + V_HOSTPAGESIZEPF4(sge_hps) | + V_HOSTPAGESIZEPF5(sge_hps) | + V_HOSTPAGESIZEPF6(sge_hps) | + V_HOSTPAGESIZEPF7(sge_hps)); + + if (is_t4(adap->params.chip) || is_t4(chip_compat)) + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(fl_align_log - + X_INGPADBOUNDARY_SHIFT) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + else { + /* + * T5 introduced the separation of the Free List Padding and + * Packing Boundaries. Thus, we can select a smaller Padding + * Boundary to avoid uselessly chewing up PCIe Link and Memory + * Bandwidth, and use a Packing Boundary which is large enough + * to avoid false sharing between CPUs, etc. + * + * For the PCI Link, the smaller the Padding Boundary the + * better. For the Memory Controller, a smaller Padding + * Boundary is better until we cross under the Memory Line + * Size (the minimum unit of transfer to/from Memory). If we + * have a Padding Boundary which is smaller than the Memory + * Line Size, that'll involve a Read-Modify-Write cycle on the + * Memory Controller which is never good. For T5 the smallest + * Padding Boundary which we can select is 32 bytes which is + * larger than any known Memory Controller Line Size so we'll + * use that. + */ + + /* + * N.B. T5 has a different interpretation of the "0" value for + * the Packing Boundary. This corresponds to 16 bytes instead + * of the expected 32 bytes. We never have a Packing Boundary + * less than 32 bytes so we can't use that special value but + * on the other hand, if we wanted 32 bytes, the best we can + * really do is 64 bytes ... + */ + if (fl_align <= 32) { + fl_align = 64; + fl_align_log = 6; + } + t4_set_reg_field(adap, A_SGE_CONTROL, + V_INGPADBOUNDARY(M_INGPADBOUNDARY) | + F_EGRSTATUSPAGESIZE, + V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) | + V_EGRSTATUSPAGESIZE(stat_len != 64)); + t4_set_reg_field(adap, A_SGE_CONTROL2, + V_INGPACKBOUNDARY(M_INGPACKBOUNDARY), + V_INGPACKBOUNDARY(fl_align_log - + X_INGPACKBOUNDARY_SHIFT)); + } + + /* + * Adjust various SGE Free List Host Buffer Sizes. + * + * The first four entries are: + * + * 0: Host Page Size + * 1: 64KB + * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) + * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) + * + * For the single-MTU buffers in unpacked mode we need to include + * space for the SGE Control Packet Shift, 14 byte Ethernet header, + * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet + * Padding boundary. All of these are accommodated in the Factory + * Default Firmware Configuration File but we need to adjust it for + * this host's cache line size. + */ + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1) + & ~(fl_align - 1)); + t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3, + (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1) + & ~(fl_align - 1)); + + t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12)); + + return 0; +} + +/** + * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible) + * @adap: the adapter + * @page_size: the host's Base Page Size + * @cache_line_size: the host's Cache Line Size + * + * Various registers in T4 contain values which are dependent on the + * host's Base Page and Cache Line Sizes. This function will fix all of + * those registers with the appropriate values as passed in ... + * + * This routine makes changes which are compatible with T4 chips. + */ +int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, + unsigned int cache_line_size) +{ + return t4_fixup_host_params_compat(adap, page_size, cache_line_size, + T4_LAST_REV); +} + +/** + * t4_fw_initialize - ask FW to initialize the device + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * + * Issues a command to FW to partially initialize the device. This + * performs initialization that generally doesn't depend on user input. + */ +int t4_fw_initialize(struct adapter *adap, unsigned int mbox) +{ + struct fw_initialize_cmd c; + + memset(&c, 0, sizeof(c)); + INIT_CMD(c, INITIALIZE, WRITE); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_query_params_rw - query FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @rw: Write and read flag + * + * Reads the value of FW or device parameters. Up to 7 parameters can be + * queried at once. + */ +static int t4_query_params_rw(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + u32 *val, int rw) +{ + unsigned int i; + int ret; + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + for (i = 0; i < nparams; i++) { + *p++ = cpu_to_be32(*params++); + if (rw) + *p = cpu_to_be32(*(val + i)); + p++; + } + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) + for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) + *val++ = be32_to_cpu(*p); + return ret; +} + +int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + u32 *val) +{ + return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0); +} + +/** + * t4_set_params_timeout - sets FW or device parameters + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF + * @vf: the VF + * @nparams: the number of parameters + * @params: the parameter names + * @val: the parameter values + * @timeout: the timeout time + * + * Sets the value of FW or device parameters. Up to 7 parameters can be + * specified at once. + */ +int t4_set_params_timeout(struct adapter *adap, unsigned int mbox, + unsigned int pf, unsigned int vf, + unsigned int nparams, const u32 *params, + const u32 *val, int timeout) +{ + struct fw_params_cmd c; + __be32 *p = &c.param[0].mnem; + + if (nparams > 7) + return -EINVAL; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_PARAMS_CMD_PFN(pf) | + V_FW_PARAMS_CMD_VFN(vf)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + + while (nparams--) { + *p++ = cpu_to_be32(*params++); + *p++ = cpu_to_be32(*val++); + } + + return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout); +} + +int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int nparams, const u32 *params, + const u32 *val) +{ + return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val, + FW_CMD_MAX_TIMEOUT); +} + +/** + * t4_alloc_vi_func - allocate a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * @portfunc: which Port Application Function MAC Address is desired + * @idstype: Intrusion Detection Type + * + * Allocates a virtual interface for the given physical port. If @mac is + * not %NULL it contains the MAC addresses of the VI as assigned by FW. + * @mac should be large enough to hold @nmac Ethernet addresses, they are + * stored consecutively so the space needed is @nmac * 6 bytes. + * Returns a negative error number or the non-negative VI id. + */ +int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox, + unsigned int port, unsigned int pf, unsigned int vf, + unsigned int nmac, u8 *mac, unsigned int *rss_size, + unsigned int portfunc, unsigned int idstype) +{ + int ret; + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_WRITE | F_FW_CMD_EXEC | + V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) | + V_FW_VI_CMD_FUNC(portfunc)); + c.portid_pkd = V_FW_VI_CMD_PORTID(port); + c.nmac = nmac - 1; + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + if (mac) { + memcpy(mac, c.mac, sizeof(c.mac)); + switch (nmac) { + case 5: + memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); + /* FALLTHROUGH */ + case 4: + memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); + /* FALLTHROUGH */ + case 3: + memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); + /* FALLTHROUGH */ + case 2: + memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); + /* FALLTHROUGH */ + } + } + if (rss_size) + *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize)); + return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid)); +} + +/** + * t4_alloc_vi - allocate an [Ethernet Function] virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @port: physical port associated with the VI + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @nmac: number of MAC addresses needed (1 to 5) + * @mac: the MAC addresses of the VI + * @rss_size: size of RSS table slice associated with this VI + * + * Backwards compatible and convieniance routine to allocate a Virtual + * Interface with a Ethernet Port Application Function and Intrustion + * Detection System disabled. + */ +int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, + unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, + unsigned int *rss_size) +{ + return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size, + FW_VI_FUNC_ETH, 0); +} + +/** + * t4_free_vi - free a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the VI + * @vf: the VF owning the VI + * @viid: virtual interface identifiler + * + * Free a previously allocated virtual interface. + */ +int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int viid) +{ + struct fw_vi_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) | + V_FW_VI_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c)); + c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid)); + + return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); +} + +/** + * t4_set_rxmode - set Rx properties of a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @mtu: the new MTU or -1 + * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change + * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change + * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change + * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it, + * -1 no change + * @sleep_ok: if true we may sleep while awaiting command completion + * + * Sets Rx properties of a virtual interface. + */ +int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, + int mtu, int promisc, int all_multi, int bcast, int vlanex, + bool sleep_ok) +{ + struct fw_vi_rxmode_cmd c; + + /* convert to FW values */ + if (mtu < 0) + mtu = M_FW_VI_RXMODE_CMD_MTU; + if (promisc < 0) + promisc = M_FW_VI_RXMODE_CMD_PROMISCEN; + if (all_multi < 0) + all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN; + if (bcast < 0) + bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN; + if (vlanex < 0) + vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_RXMODE_CMD_VIID(viid)); + c.retval_len16 = cpu_to_be32(FW_LEN16(c)); + c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) | + V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) | + V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | + V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | + V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); + return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); +} + +/** + * t4_change_mac - modifies the exact-match filter for a MAC address + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @idx: index of existing filter for old value of MAC address, or -1 + * @addr: the new MAC address value + * @persist: whether a new MAC allocation should be persistent + * @add_smt: if true also add the address to the HW SMT + * + * Modifies an exact-match filter and sets it to the new MAC address if + * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the + * latter case the address is added persistently if @persist is %true. + * + * Note that in general it is not possible to modify the value of a given + * filter so the generic way to modify an address filter is to free the one + * being used by the old address value and allocate a new filter for the + * new address value. + * + * Returns a negative error number or the index of the filter with the new + * MAC value. Note that this index may differ from @idx. + */ +int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, + int idx, const u8 *addr, bool persist, bool add_smt) +{ + int ret, mode; + struct fw_vi_mac_cmd c; + struct fw_vi_mac_exact *p = c.u.exact; + int max_mac_addr = adap->params.arch.mps_tcam_size; + + if (idx < 0) /* new allocation */ + idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; + mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_WRITE | + V_FW_VI_MAC_CMD_VIID(viid)); + c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1)); + p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID | + V_FW_VI_MAC_CMD_SMAC_RESULT(mode) | + V_FW_VI_MAC_CMD_IDX(idx)); + memcpy(p->macaddr, addr, sizeof(p->macaddr)); + + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret == 0) { + ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx)); + if (ret >= max_mac_addr) + ret = -ENOMEM; + } + return ret; +} + +/** + * t4_enable_vi_params - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * @dcb_en: 1=enable delivery of Data Center Bridging messages. + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, + unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) +{ + struct fw_vi_enable_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_VI_ENABLE_CMD_VIID(viid)); + c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) | + V_FW_VI_ENABLE_CMD_EEN(tx_en) | + V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) | + FW_LEN16(c)); + return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_enable_vi - enable/disable a virtual interface + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @rx_en: 1=enable Rx, 0=disable Rx + * @tx_en: 1=enable Tx, 0=disable Tx + * + * Enables/disables a virtual interface. Note that setting DCB Enable + * only makes sense when enabling a Virtual Interface ... + */ +int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, + bool rx_en, bool tx_en) +{ + return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); +} + +/** + * t4_iq_start_stop - enable/disable an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @start: %true to enable the queues, %false to disable them + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Starts or stops an ingress queue and its associated FLs, if any. + */ +int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start, + unsigned int pf, unsigned int vf, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) | + V_FW_IQ_CMD_IQSTOP(!start) | + FW_LEN16(c)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_iq_free - free an ingress queue and its FLs + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queues + * @vf: the VF owning the queues + * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.) + * @iqid: ingress queue id + * @fl0id: FL0 queue id or 0xffff if no attached FL0 + * @fl1id: FL1 queue id or 0xffff if no attached FL1 + * + * Frees an ingress queue and its associated FLs, if any. + */ +int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int iqtype, unsigned int iqid, + unsigned int fl0id, unsigned int fl1id) +{ + struct fw_iq_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | + F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) | + V_FW_IQ_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c)); + c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype)); + c.iqid = cpu_to_be16(iqid); + c.fl0id = cpu_to_be16(fl0id); + c.fl1id = cpu_to_be16(fl1id); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_eth_eq_free - free an Ethernet egress queue + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @pf: the PF owning the queue + * @vf: the VF owning the queue + * @eqid: egress queue id + * + * Frees an Ethernet egress queue. + */ +int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, + unsigned int vf, unsigned int eqid) +{ + struct fw_eq_eth_cmd c; + + memset(&c, 0, sizeof(c)); + c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_EXEC | + V_FW_EQ_ETH_CMD_PFN(pf) | + V_FW_EQ_ETH_CMD_VFN(vf)); + c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); + c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid)); + return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); +} + +/** + * t4_handle_fw_rpl - process a FW reply message + * @adap: the adapter + * @rpl: start of the FW message + * + * Processes a FW message, such as link state change messages. + */ +int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) +{ + u8 opcode = *(const u8 *)rpl; + + /* + * This might be a port command ... this simplifies the following + * conditionals ... We can get away with pre-dereferencing + * action_to_len16 because it's in the first 16 bytes and all messages + * will be at least that long. + */ + const struct fw_port_cmd *p = (const void *)rpl; + unsigned int action = + G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16)); + + if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) { + /* link/module state change message */ + int speed = 0, fc = 0, i; + int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid)); + struct port_info *pi = NULL; + struct link_config *lc; + u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype); + int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0; + u32 mod = G_FW_PORT_CMD_MODTYPE(stat); + + if (stat & F_FW_PORT_CMD_RXPAUSE) + fc |= PAUSE_RX; + if (stat & F_FW_PORT_CMD_TXPAUSE) + fc |= PAUSE_TX; + if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) + speed = ETH_LINK_SPEED_100; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) + speed = ETH_LINK_SPEED_1000; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) + speed = ETH_LINK_SPEED_10000; + else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) + speed = ETH_LINK_SPEED_40G; + + for_each_port(adap, i) { + pi = adap2pinfo(adap, i); + if (pi->tx_chan == chan) + break; + } + lc = &pi->link_cfg; + + if (mod != pi->mod_type) { + pi->mod_type = mod; + t4_os_portmod_changed(adap, i); + } + if (link_ok != lc->link_ok || speed != lc->speed || + fc != lc->fc) { /* something changed */ + if (!link_ok && lc->link_ok) { + static const char * const reason[] = { + "Link Down", + "Remote Fault", + "Auto-negotiation Failure", + "Reserved", + "Insufficient Airflow", + "Unable To Determine Reason", + "No RX Signal Detected", + "Reserved", + }; + unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat); + + dev_warn(adap, "Port %d link down, reason: %s\n", + chan, reason[rc]); + } + lc->link_ok = link_ok; + lc->speed = speed; + lc->fc = fc; + lc->supported = be16_to_cpu(p->u.info.pcap); + } + } else { + dev_warn(adap, "Unknown firmware reply %d\n", opcode); + return -EINVAL; + } + return 0; +} + +void t4_reset_link_config(struct adapter *adap, int idx) +{ + struct port_info *pi = adap2pinfo(adap, idx); + struct link_config *lc = &pi->link_cfg; + + lc->link_ok = 0; + lc->requested_speed = 0; + lc->requested_fc = 0; + lc->speed = 0; + lc->fc = 0; +} + +/** + * init_link_config - initialize a link's SW state + * @lc: structure holding the link state + * @caps: link capabilities + * + * Initializes the SW state maintained for each link, including the link's + * capabilities and default speed/flow-control/autonegotiation settings. + */ +static void init_link_config(struct link_config *lc, + unsigned int caps) +{ + lc->supported = caps; + lc->requested_speed = 0; + lc->speed = 0; + lc->requested_fc = 0; + lc->fc = 0; + if (lc->supported & FW_PORT_CAP_ANEG) { + lc->advertising = lc->supported & ADVERT_MASK; + lc->autoneg = AUTONEG_ENABLE; + } else { + lc->advertising = 0; + lc->autoneg = AUTONEG_DISABLE; + } +} + +/** + * t4_wait_dev_ready - wait till to reads of registers work + * + * Right after the device is RESET is can take a small amount of time + * for it to respond to register reads. Until then, all reads will + * return either 0xff...ff or 0xee...ee. Return an error if reads + * don't work within a reasonable time frame. + */ +static int t4_wait_dev_ready(struct adapter *adapter) +{ + u32 whoami; + + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + + if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS) + return 0; + + msleep(500); + whoami = t4_read_reg(adapter, A_PL_WHOAMI); + return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS + ? 0 : -EIO); +} + +struct flash_desc { + u32 vendor_and_model_id; + u32 size_mb; +}; + +int t4_get_flash_params(struct adapter *adapter) +{ + /* + * Table for non-Numonix supported flash parts. Numonix parts are left + * to the preexisting well-tested code. All flash parts have 64KB + * sectors. + */ + static struct flash_desc supported_flash[] = { + { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ + }; + + int ret; + unsigned int i; + u32 info = 0; + + ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID); + if (!ret) + ret = sf1_read(adapter, 3, 0, 1, &info); + t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */ + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(supported_flash); ++i) + if (supported_flash[i].vendor_and_model_id == info) { + adapter->params.sf_size = supported_flash[i].size_mb; + adapter->params.sf_nsec = + adapter->params.sf_size / SF_SEC_SIZE; + return 0; + } + + if ((info & 0xff) != 0x20) /* not a Numonix flash */ + return -EINVAL; + info >>= 16; /* log2 of size */ + if (info >= 0x14 && info < 0x18) + adapter->params.sf_nsec = 1 << (info - 16); + else if (info == 0x18) + adapter->params.sf_nsec = 64; + else + return -EINVAL; + adapter->params.sf_size = 1 << info; + + /* + * We should reject adapters with FLASHes which are too small. So, emit + * a warning. + */ + if (adapter->params.sf_size < FLASH_MIN_SIZE) { + dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n", + adapter->params.sf_size, FLASH_MIN_SIZE); + } + + return 0; +} + +/** + * t4_prep_adapter - prepare SW and HW for operation + * @adapter: the adapter + * + * Initialize adapter SW state for the various HW modules, set initial + * values for some adapter tunables, take PHYs out of reset, and + * initialize the MDIO interface. + */ +int t4_prep_adapter(struct adapter *adapter) +{ + int ret, ver; + u32 pl_rev; + + ret = t4_wait_dev_ready(adapter); + if (ret < 0) + return ret; + + pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV)); + adapter->params.pci.device_id = adapter->pdev->id.device_id; + adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id; + + /* + * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS + * ADAPTER (VERSION << 4 | REVISION) + */ + ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id); + adapter->params.chip = 0; + switch (ver) { + case CHELSIO_T5: + adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); + adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE; + adapter->params.arch.mps_tcam_size = + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + adapter->params.arch.mps_rplc_size = 128; + adapter->params.arch.nchan = NCHAN; + adapter->params.arch.vfcount = 128; + break; + default: + dev_err(adapter, "%s: Device %d is not supported\n", + __func__, adapter->params.pci.device_id); + return -EINVAL; + } + + ret = t4_get_flash_params(adapter); + if (ret < 0) + return ret; + + adapter->params.cim_la_size = CIMLA_SIZE; + + init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); + + /* + * Default port and clock for debugging in case we can't reach FW. + */ + adapter->params.nports = 1; + adapter->params.portvec = 1; + adapter->params.vpd.cclk = 50000; + + return 0; +} + +/** + * t4_bar2_sge_qregs - return BAR2 SGE Queue register information + * @adapter: the adapter + * @qid: the Queue ID + * @qtype: the Ingress or Egress type for @qid + * @pbar2_qoffset: BAR2 Queue Offset + * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues + * + * Returns the BAR2 SGE Queue Registers information associated with the + * indicated Absolute Queue ID. These are passed back in return value + * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue + * and T4_BAR2_QTYPE_INGRESS for Ingress Queues. + * + * This may return an error which indicates that BAR2 SGE Queue + * registers aren't available. If an error is not returned, then the + * following values are returned: + * + * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers + * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid + * + * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which + * require the "Inferred Queue ID" ability may be used. E.g. the + * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0, + * then these "Inferred Queue ID" register may not be used. + */ +int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, + enum t4_bar2_qtype qtype, u64 *pbar2_qoffset, + unsigned int *pbar2_qid) +{ + unsigned int page_shift, page_size, qpp_shift, qpp_mask; + u64 bar2_page_offset, bar2_qoffset; + unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred; + + /* + * T4 doesn't support BAR2 SGE Queue registers. + */ + if (is_t4(adapter->params.chip)) + return -EINVAL; + + /* + * Get our SGE Page Size parameters. + */ + page_shift = adapter->params.sge.hps + 10; + page_size = 1 << page_shift; + + /* + * Get the right Queues per Page parameters for our Queue. + */ + qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ? + adapter->params.sge.eq_qpp : + adapter->params.sge.iq_qpp); + qpp_mask = (1 << qpp_shift) - 1; + + /* + * Calculate the basics of the BAR2 SGE Queue register area: + * o The BAR2 page the Queue registers will be in. + * o The BAR2 Queue ID. + * o The BAR2 Queue ID Offset into the BAR2 page. + */ + bar2_page_offset = ((qid >> qpp_shift) << page_shift); + bar2_qid = qid & qpp_mask; + bar2_qid_offset = bar2_qid * SGE_UDB_SIZE; + + /* + * If the BAR2 Queue ID Offset is less than the Page Size, then the + * hardware will infer the Absolute Queue ID simply from the writes to + * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a + * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply + * write to the first BAR2 SGE Queue Area within the BAR2 Page with + * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID + * from the BAR2 Page and BAR2 Queue ID. + * + * One important censequence of this is that some BAR2 SGE registers + * have a "Queue ID" field and we can write the BAR2 SGE Queue ID + * there. But other registers synthesize the SGE Queue ID purely + * from the writes to the registers -- the Write Combined Doorbell + * Buffer is a good example. These BAR2 SGE Registers are only + * available for those BAR2 SGE Register areas where the SGE Absolute + * Queue ID can be inferred from simple writes. + */ + bar2_qoffset = bar2_page_offset; + bar2_qinferred = (bar2_qid_offset < page_size); + if (bar2_qinferred) { + bar2_qoffset += bar2_qid_offset; + bar2_qid = 0; + } + + *pbar2_qoffset = bar2_qoffset; + *pbar2_qid = bar2_qid; + return 0; +} + +/** + * t4_init_sge_params - initialize adap->params.sge + * @adapter: the adapter + * + * Initialize various fields of the adapter's SGE Parameters structure. + */ +int t4_init_sge_params(struct adapter *adapter) +{ + struct sge_params *sge_params = &adapter->params.sge; + u32 hps, qpp; + unsigned int s_hps, s_qpp; + + /* + * Extract the SGE Page Size for our PF. + */ + hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE); + s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * + adapter->pf); + sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0); + + /* + * Extract the SGE Egress and Ingess Queues Per Page for our PF. + */ + s_qpp = (S_QUEUESPERPAGEPF0 + + (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf); + qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); + sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); + sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0); + + return 0; +} + +/** + * t4_init_tp_params - initialize adap->params.tp + * @adap: the adapter + * + * Initialize various fields of the adapter's TP Parameters structure. + */ +int t4_init_tp_params(struct adapter *adap) +{ + int chan; + u32 v; + + v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION); + adap->params.tp.tre = G_TIMERRESOLUTION(v); + adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v); + + /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ + for (chan = 0; chan < NCHAN; chan++) + adap->params.tp.tx_modq[chan] = chan; + + /* + * Cache the adapter's Compressed Filter Mode and global Incress + * Configuration. + */ + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP); + t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA, + &adap->params.tp.ingress_config, 1, + A_TP_INGRESS_CONFIG); + + /* + * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field + * shift positions of several elements of the Compressed Filter Tuple + * for this adapter which we need frequently ... + */ + adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); + adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); + adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); + adap->params.tp.protocol_shift = t4_filter_field_shift(adap, + F_PROTOCOL); + + /* + * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID + * represents the presense of an Outer VLAN instead of a VNIC ID. + */ + if ((adap->params.tp.ingress_config & F_VNIC) == 0) + adap->params.tp.vnic_shift = -1; + + return 0; +} + +/** + * t4_filter_field_shift - calculate filter field shift + * @adap: the adapter + * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) + * + * Return the shift position of a filter field within the Compressed + * Filter Tuple. The filter field is specified via its selection bit + * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. + */ +int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel) +{ + unsigned int filter_mode = adap->params.tp.vlan_pri_map; + unsigned int sel; + int field_shift; + + if ((filter_mode & filter_sel) == 0) + return -1; + + for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { + switch (filter_mode & sel) { + case F_FCOE: + field_shift += W_FT_FCOE; + break; + case F_PORT: + field_shift += W_FT_PORT; + break; + case F_VNIC_ID: + field_shift += W_FT_VNIC_ID; + break; + case F_VLAN: + field_shift += W_FT_VLAN; + break; + case F_TOS: + field_shift += W_FT_TOS; + break; + case F_PROTOCOL: + field_shift += W_FT_PROTOCOL; + break; + case F_ETHERTYPE: + field_shift += W_FT_ETHERTYPE; + break; + case F_MACMATCH: + field_shift += W_FT_MACMATCH; + break; + case F_MPSHITTYPE: + field_shift += W_FT_MPSHITTYPE; + break; + case F_FRAGMENTATION: + field_shift += W_FT_FRAGMENTATION; + break; + } + } + return field_shift; +} + +int t4_init_rss_mode(struct adapter *adap, int mbox) +{ + int i, ret; + struct fw_rss_vi_config_cmd rvc; + + memset(&rvc, 0, sizeof(rvc)); + + for_each_port(adap, i) { + struct port_info *p = adap2pinfo(adap, i); + + rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); + rvc.retval_len16 = htonl(FW_LEN16(rvc)); + ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); + if (ret) + return ret; + p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); + } + return 0; +} + +int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) +{ + u8 addr[6]; + int ret, i, j = 0; + struct fw_port_cmd c; + + memset(&c, 0, sizeof(c)); + + for_each_port(adap, i) { + unsigned int rss_size = 0; + struct port_info *p = adap2pinfo(adap, i); + + while ((adap->params.portvec & (1 << j)) == 0) + j++; + + c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) | + F_FW_CMD_REQUEST | F_FW_CMD_READ | + V_FW_PORT_CMD_PORTID(j)); + c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION( + FW_PORT_ACTION_GET_PORT_INFO) | + FW_LEN16(c)); + ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); + if (ret) + return ret; + + ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); + if (ret < 0) + return ret; + + p->viid = ret; + p->tx_chan = j; + p->rss_size = rss_size; + t4_os_set_hw_addr(adap, i, addr); + + ret = be32_to_cpu(c.u.info.lstatus_to_modtype); + p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ? + G_FW_PORT_CMD_MDIOADDR(ret) : -1; + p->port_type = G_FW_PORT_CMD_PTYPE(ret); + p->mod_type = FW_PORT_MOD_TYPE_NA; + + init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap)); + j++; + } + return 0; +} diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h new file mode 100644 index 0000000000..bf623cf4a2 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_hw.h @@ -0,0 +1,149 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_HW_H +#define __T4_HW_H + +enum { + NCHAN = 4, /* # of HW channels */ + NMTUS = 16, /* size of MTU table */ + NCCTRL_WIN = 32, /* # of congestion control windows */ + MBOX_LEN = 64, /* mailbox size in bytes */ + UDBS_SEG_SIZE = 128, /* segment size for BAR2 user doorbells */ +}; + +enum { + CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ +}; + +enum { + SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */ +}; + +enum { + SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ + SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ +}; + +/* PCI-e memory window access */ +enum pcie_memwin { + MEMWIN_NIC = 0, +}; + +enum { + SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ + SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */ + /* max no. of desc allowed in WR */ + SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / SGE_EQ_IDXSIZE, +}; + +struct sge_qstat { /* data written to SGE queue status entries */ + __be32 qid; + __be16 cidx; + __be16 pidx; +}; + +/* + * Structure for last 128 bits of response descriptors + */ +struct rsp_ctrl { + __be32 hdrbuflen_pidx; + __be32 pldbuflen_qid; + union { + u8 type_gen; + __be64 last_flit; + } u; +}; + +#define S_RSPD_NEWBUF 31 +#define V_RSPD_NEWBUF(x) ((x) << S_RSPD_NEWBUF) +#define F_RSPD_NEWBUF V_RSPD_NEWBUF(1U) + +#define S_RSPD_LEN 0 +#define M_RSPD_LEN 0x7fffffff +#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN) +#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN) + +#define S_RSPD_GEN 7 +#define V_RSPD_GEN(x) ((x) << S_RSPD_GEN) +#define F_RSPD_GEN V_RSPD_GEN(1U) + +#define S_RSPD_TYPE 4 +#define M_RSPD_TYPE 0x3 +#define V_RSPD_TYPE(x) ((x) << S_RSPD_TYPE) +#define G_RSPD_TYPE(x) (((x) >> S_RSPD_TYPE) & M_RSPD_TYPE) + +/* Rx queue interrupt deferral field: timer index */ +#define S_QINTR_CNT_EN 0 +#define V_QINTR_CNT_EN(x) ((x) << S_QINTR_CNT_EN) +#define F_QINTR_CNT_EN V_QINTR_CNT_EN(1U) + +#define S_QINTR_TIMER_IDX 1 +#define M_QINTR_TIMER_IDX 0x7 +#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX) +#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX) + +/* + * Flash layout. + */ +#define FLASH_START(start) ((start) * SF_SEC_SIZE) +#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE) + +enum { + /* + * Location of firmware image in FLASH. + */ + FLASH_FW_START_SEC = 8, + FLASH_FW_NSECS = 16, + FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), + FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), + + /* + * Location of Firmware Configuration File in FLASH. + */ + FLASH_CFG_START_SEC = 31, + FLASH_CFG_NSECS = 1, + FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), + FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + + /* + * We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, +}; + +#undef FLASH_START +#undef FLASH_MAX_SIZE + +#endif /* __T4_HW_H */ diff --git a/drivers/net/cxgbe/base/t4_msg.h b/drivers/net/cxgbe/base/t4_msg.h new file mode 100644 index 0000000000..4b04cd0df9 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_msg.h @@ -0,0 +1,345 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef T4_MSG_H +#define T4_MSG_H + +enum { + CPL_SGE_EGR_UPDATE = 0xA5, + CPL_FW4_MSG = 0xC0, + CPL_FW6_MSG = 0xE0, + CPL_TX_PKT_LSO = 0xED, + CPL_TX_PKT_XT = 0xEE, +}; + +enum { /* TX_PKT_XT checksum types */ + TX_CSUM_TCPIP = 8, + TX_CSUM_UDPIP = 9, + TX_CSUM_TCPIP6 = 10, +}; + +union opcode_tid { + __be32 opcode_tid; + __u8 opcode; +}; + +struct rss_header { + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 channel:2; + __u8 filter_hit:1; + __u8 filter_tid:1; + __u8 hash_type:2; + __u8 ipv6:1; + __u8 send2fw:1; +#else + __u8 send2fw:1; + __u8 ipv6:1; + __u8 hash_type:2; + __u8 filter_tid:1; + __u8 filter_hit:1; + __u8 channel:2; +#endif + __be16 qid; + __be32 hash_val; +}; + +#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW) +#define RSS_HDR struct rss_header rss_hdr +#else +#define RSS_HDR +#endif + +#ifndef CHELSIO_FW +struct work_request_hdr { + __be32 wr_hi; + __be32 wr_mid; + __be64 wr_lo; +}; + +#define WR_HDR struct work_request_hdr wr +#define WR_HDR_SIZE sizeof(struct work_request_hdr) +#else +#define WR_HDR +#define WR_HDR_SIZE 0 +#endif + +struct cpl_tx_data { + union opcode_tid ot; + __be32 len; + __be32 rsvd; + __be32 flags; +}; + +struct cpl_tx_pkt_core { + __be32 ctrl0; + __be16 pack; + __be16 len; + __be64 ctrl1; +}; + +struct cpl_tx_pkt { + WR_HDR; + struct cpl_tx_pkt_core c; +}; + +/* cpl_tx_pkt_core.ctrl0 fields */ +#define S_TXPKT_PF 8 +#define M_TXPKT_PF 0x7 +#define V_TXPKT_PF(x) ((x) << S_TXPKT_PF) +#define G_TXPKT_PF(x) (((x) >> S_TXPKT_PF) & M_TXPKT_PF) + +#define S_TXPKT_INTF 16 +#define M_TXPKT_INTF 0xF +#define V_TXPKT_INTF(x) ((x) << S_TXPKT_INTF) +#define G_TXPKT_INTF(x) (((x) >> S_TXPKT_INTF) & M_TXPKT_INTF) + +#define S_TXPKT_OPCODE 24 +#define M_TXPKT_OPCODE 0xFF +#define V_TXPKT_OPCODE(x) ((x) << S_TXPKT_OPCODE) +#define G_TXPKT_OPCODE(x) (((x) >> S_TXPKT_OPCODE) & M_TXPKT_OPCODE) + +/* cpl_tx_pkt_core.ctrl1 fields */ +#define S_TXPKT_IPHDR_LEN 20 +#define M_TXPKT_IPHDR_LEN 0x3FFF +#define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN) +#define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN) + +#define S_TXPKT_ETHHDR_LEN 34 +#define M_TXPKT_ETHHDR_LEN 0x3F +#define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN) +#define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN) + +#define S_T6_TXPKT_ETHHDR_LEN 32 +#define M_T6_TXPKT_ETHHDR_LEN 0xFF +#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN) +#define G_T6_TXPKT_ETHHDR_LEN(x) \ + (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN) + +#define S_TXPKT_CSUM_TYPE 40 +#define M_TXPKT_CSUM_TYPE 0xF +#define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE) +#define G_TXPKT_CSUM_TYPE(x) (((x) >> S_TXPKT_CSUM_TYPE) & M_TXPKT_CSUM_TYPE) + +#define S_TXPKT_VLAN 44 +#define M_TXPKT_VLAN 0xFFFF +#define V_TXPKT_VLAN(x) ((__u64)(x) << S_TXPKT_VLAN) +#define G_TXPKT_VLAN(x) (((x) >> S_TXPKT_VLAN) & M_TXPKT_VLAN) + +#define S_TXPKT_VLAN_VLD 60 +#define V_TXPKT_VLAN_VLD(x) ((__u64)(x) << S_TXPKT_VLAN_VLD) +#define F_TXPKT_VLAN_VLD V_TXPKT_VLAN_VLD(1ULL) + +#define S_TXPKT_IPCSUM_DIS 62 +#define V_TXPKT_IPCSUM_DIS(x) ((__u64)(x) << S_TXPKT_IPCSUM_DIS) +#define F_TXPKT_IPCSUM_DIS V_TXPKT_IPCSUM_DIS(1ULL) + +#define S_TXPKT_L4CSUM_DIS 63 +#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS) +#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL) + +struct cpl_tx_pkt_lso_core { + __be32 lso_ctrl; + __be16 ipid_ofst; + __be16 mss; + __be32 seqno_offset; + __be32 len; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +struct cpl_tx_pkt_lso { + WR_HDR; + struct cpl_tx_pkt_lso_core c; + /* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */ +}; + +/* cpl_tx_pkt_lso_core.lso_ctrl fields */ +#define S_LSO_TCPHDR_LEN 0 +#define M_LSO_TCPHDR_LEN 0xF +#define V_LSO_TCPHDR_LEN(x) ((x) << S_LSO_TCPHDR_LEN) +#define G_LSO_TCPHDR_LEN(x) (((x) >> S_LSO_TCPHDR_LEN) & M_LSO_TCPHDR_LEN) + +#define S_LSO_IPHDR_LEN 4 +#define M_LSO_IPHDR_LEN 0xFFF +#define V_LSO_IPHDR_LEN(x) ((x) << S_LSO_IPHDR_LEN) +#define G_LSO_IPHDR_LEN(x) (((x) >> S_LSO_IPHDR_LEN) & M_LSO_IPHDR_LEN) + +#define S_LSO_ETHHDR_LEN 16 +#define M_LSO_ETHHDR_LEN 0xF +#define V_LSO_ETHHDR_LEN(x) ((x) << S_LSO_ETHHDR_LEN) +#define G_LSO_ETHHDR_LEN(x) (((x) >> S_LSO_ETHHDR_LEN) & M_LSO_ETHHDR_LEN) + +#define S_LSO_IPV6 20 +#define V_LSO_IPV6(x) ((x) << S_LSO_IPV6) +#define F_LSO_IPV6 V_LSO_IPV6(1U) + +#define S_LSO_LAST_SLICE 22 +#define V_LSO_LAST_SLICE(x) ((x) << S_LSO_LAST_SLICE) +#define F_LSO_LAST_SLICE V_LSO_LAST_SLICE(1U) + +#define S_LSO_FIRST_SLICE 23 +#define V_LSO_FIRST_SLICE(x) ((x) << S_LSO_FIRST_SLICE) +#define F_LSO_FIRST_SLICE V_LSO_FIRST_SLICE(1U) + +#define S_LSO_OPCODE 24 +#define M_LSO_OPCODE 0xFF +#define V_LSO_OPCODE(x) ((x) << S_LSO_OPCODE) +#define G_LSO_OPCODE(x) (((x) >> S_LSO_OPCODE) & M_LSO_OPCODE) + +#define S_LSO_T5_XFER_SIZE 0 +#define M_LSO_T5_XFER_SIZE 0xFFFFFFF +#define V_LSO_T5_XFER_SIZE(x) ((x) << S_LSO_T5_XFER_SIZE) +#define G_LSO_T5_XFER_SIZE(x) (((x) >> S_LSO_T5_XFER_SIZE) & M_LSO_T5_XFER_SIZE) + +struct cpl_rx_pkt { + RSS_HDR; + __u8 opcode; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + __u8 iff:4; + __u8 csum_calc:1; + __u8 ipmi_pkt:1; + __u8 vlan_ex:1; + __u8 ip_frag:1; +#else + __u8 ip_frag:1; + __u8 vlan_ex:1; + __u8 ipmi_pkt:1; + __u8 csum_calc:1; + __u8 iff:4; +#endif + __be16 csum; + __be16 vlan; + __be16 len; + __be32 l2info; + __be16 hdr_len; + __be16 err_vec; +}; + +/* rx_pkt.l2info fields */ +#define S_RXF_UDP 22 +#define V_RXF_UDP(x) ((x) << S_RXF_UDP) +#define F_RXF_UDP V_RXF_UDP(1U) + +#define S_RXF_TCP 23 +#define V_RXF_TCP(x) ((x) << S_RXF_TCP) +#define F_RXF_TCP V_RXF_TCP(1U) + +#define S_RXF_IP 24 +#define V_RXF_IP(x) ((x) << S_RXF_IP) +#define F_RXF_IP V_RXF_IP(1U) + +#define S_RXF_IP6 25 +#define V_RXF_IP6(x) ((x) << S_RXF_IP6) +#define F_RXF_IP6 V_RXF_IP6(1U) + +/* cpl_fw*.type values */ +enum { + FW_TYPE_RSSCPL = 4, +}; + +struct cpl_fw4_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[2]; +}; + +struct cpl_fw6_msg { + RSS_HDR; + u8 opcode; + u8 type; + __be16 rsvd0; + __be32 rsvd1; + __be64 data[4]; +}; + +enum { + ULP_TX_SC_IMM = 0x81, + ULP_TX_SC_DSGL = 0x82, + ULP_TX_SC_ISGL = 0x83 +}; + +#define S_ULPTX_CMD 24 +#define M_ULPTX_CMD 0xFF +#define V_ULPTX_CMD(x) ((x) << S_ULPTX_CMD) + +#define S_ULP_TX_SC_MORE 23 +#define V_ULP_TX_SC_MORE(x) ((x) << S_ULP_TX_SC_MORE) +#define F_ULP_TX_SC_MORE V_ULP_TX_SC_MORE(1U) + +struct ulptx_sge_pair { + __be32 len[2]; + __be64 addr[2]; +}; + +struct ulptx_sgl { + __be32 cmd_nsge; + __be32 len0; + __be64 addr0; + +#if !(defined C99_NOT_SUPPORTED) + struct ulptx_sge_pair sge[0]; +#endif + +}; + +struct ulptx_idata { + __be32 cmd_more; + __be32 len; +}; + +#define S_ULPTX_NSGE 0 +#define M_ULPTX_NSGE 0xFFFF +#define V_ULPTX_NSGE(x) ((x) << S_ULPTX_NSGE) + +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +/* ulp_txpkt.cmd_dest fields */ +#define S_ULP_TXPKT_DEST 16 +#define M_ULP_TXPKT_DEST 0x3 +#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST) + +#define S_ULP_TXPKT_FID 4 +#define M_ULP_TXPKT_FID 0x7ff +#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID) + +#define S_ULP_TXPKT_RO 3 +#define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO) +#define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U) + +#endif /* T4_MSG_H */ diff --git a/drivers/net/cxgbe/base/t4_pci_id_tbl.h b/drivers/net/cxgbe/base/t4_pci_id_tbl.h new file mode 100644 index 0000000000..ea72edff75 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_pci_id_tbl.h @@ -0,0 +1,148 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_PCI_ID_TBL_H__ +#define __T4_PCI_ID_TBL_H__ + +/* + * The Os-Dependent code can defined cpp macros for creating a PCI Device ID + * Table. This is useful because it allows the PCI ID Table to be maintained + * in a single place and all supporting OSes to get new PCI Device IDs + * automatically. + * + * The macros are: + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + * -- Used to start the definition of the PCI ID Table. + * + * CH_PCI_DEVICE_ID_FUNCTION + * -- The PCI Function Number to use in the PCI Device ID Table. "0" + * -- for drivers attaching to PF0-3, "4" for drivers attaching to PF4, + * -- "8" for drivers attaching to SR-IOV Virtual Functions, etc. + * + * CH_PCI_DEVICE_ID_FUNCTION2 [optional] + * -- If defined, create a PCI Device ID Table with both + * -- CH_PCI_DEVICE_ID_FUNCTION and CH_PCI_DEVICE_ID_FUNCTION2 populated. + * + * CH_PCI_ID_TABLE_ENTRY(DeviceID) + * -- Used for the individual PCI Device ID entries. Note that we will + * -- be adding a trailing comma (",") after all of the entries (and + * -- between the pairs of entries if CH_PCI_DEVICE_ID_FUNCTION2 is defined). + * + * CH_PCI_DEVICE_ID_TABLE_DEFINE_END + * -- Used to finish the definition of the PCI ID Table. Note that we + * -- will be adding a trailing semi-colon (";") here. + * + * CH_PCI_DEVICE_ID_BYPASS_SUPPORTED [optional] + * -- If defined, indicates that the OS Driver has support for Bypass + * -- Adapters. + */ +#ifdef CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + +/* + * Some sanity checks ... + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION +#error CH_PCI_DEVICE_ID_FUNCTION not defined! +#endif +#ifndef CH_PCI_ID_TABLE_ENTRY +#error CH_PCI_ID_TABLE_ENTRY not defined! +#endif +#ifndef CH_PCI_DEVICE_ID_TABLE_DEFINE_END +#error CH_PCI_DEVICE_ID_TABLE_DEFINE_END not defined! +#endif + +/* + * T4 and later ASICs use a PCI Device ID scheme of 0xVFPP where: + * + * V = "4" for T4; "5" for T5, etc. + * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs + * PP = adapter product designation + * + * We use this consistency in order to create the proper PCI Device IDs + * for the specified CH_PCI_DEVICE_ID_FUNCTION. + */ +#ifndef CH_PCI_DEVICE_ID_FUNCTION2 +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)) +#else +#define CH_PCI_ID_TABLE_FENTRY(devid) \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION) << 8)), \ + CH_PCI_ID_TABLE_ENTRY((devid) | \ + ((CH_PCI_DEVICE_ID_FUNCTION2) << 8)) +#endif + +CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN + /* + * T5 adapters: + */ + CH_PCI_ID_TABLE_FENTRY(0x5000), /* T580-dbg */ + CH_PCI_ID_TABLE_FENTRY(0x5001), /* T520-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5002), /* T522-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5003), /* T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5004), /* T520-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5005), /* T540-bch */ + CH_PCI_ID_TABLE_FENTRY(0x5006), /* T540-ch */ + CH_PCI_ID_TABLE_FENTRY(0x5007), /* T520-so */ + CH_PCI_ID_TABLE_FENTRY(0x5008), /* T520-cx */ + CH_PCI_ID_TABLE_FENTRY(0x5009), /* T520-bt */ + CH_PCI_ID_TABLE_FENTRY(0x500a), /* T504-bt */ +#ifdef CH_PCI_DEVICE_ID_BYPASS_SUPPORTED + CH_PCI_ID_TABLE_FENTRY(0x500b), /* B520-sr */ + CH_PCI_ID_TABLE_FENTRY(0x500c), /* B504-bt */ +#endif + CH_PCI_ID_TABLE_FENTRY(0x500d), /* T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x500e), /* T540-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5010), /* T580-LP-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5011), /* T520-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5012), /* T560-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5013), /* T580-chr */ + CH_PCI_ID_TABLE_FENTRY(0x5014), /* T580-so */ + CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ + CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5083), /* Custom T540-LP-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5084), /* Custom T580-cr */ + CH_PCI_ID_TABLE_FENTRY(0x5085), /* Custom 3x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5086), /* Custom 2x T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5087), /* Custom T580-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5088), /* Custom T570-CR */ + CH_PCI_ID_TABLE_FENTRY(0x5089), /* Custom T520-CR */ +CH_PCI_DEVICE_ID_TABLE_DEFINE_END; + +#endif /* CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN */ + +#endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/cxgbe/base/t4_regs.h b/drivers/net/cxgbe/base/t4_regs.h new file mode 100644 index 0000000000..cd28b593a8 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_regs.h @@ -0,0 +1,779 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define MYPF_BASE 0x1b000 +#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) + +#define PF0_BASE 0x1e000 +#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr)) + +#define PF_STRIDE 0x400 +#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE) +#define PF_REG(idx, reg) (PF_BASE(idx) + (reg)) + +#define MYPORT_BASE 0x1c000 +#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr)) + +#define PORT0_BASE 0x20000 +#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr)) + +#define PORT_STRIDE 0x2000 +#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE) +#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg)) + +#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8) +#define NUM_PCIE_MEM_ACCESS_INSTANCES 8 + +#define PCIE_FW_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_PCIE_FW_INSTANCES 8 + +#define T5_MYPORT_BASE 0x2c000 +#define T5_MYPORT_REG(reg_addr) (T5_MYPORT_BASE + (reg_addr)) + +#define T5_PORT0_BASE 0x30000 +#define T5_PORT0_REG(reg_addr) (T5_PORT0_BASE + (reg_addr)) + +#define T5_PORT_STRIDE 0x4000 +#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE) +#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg)) + +#define MPS_T5_CLS_SRAM_L(idx) (A_MPS_T5_CLS_SRAM_L + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512 + +#define MPS_T5_CLS_SRAM_H(idx) (A_MPS_T5_CLS_SRAM_H + (idx) * 8) +#define NUM_MPS_T5_CLS_SRAM_H_INSTANCES 512 + +/* registers for module SGE */ +#define SGE_BASE_ADDR 0x1000 + +#define A_SGE_PF_KDOORBELL 0x0 + +#define S_QID 15 +#define M_QID 0x1ffffU +#define V_QID(x) ((x) << S_QID) +#define G_QID(x) (((x) >> S_QID) & M_QID) + +#define S_DBPRIO 14 +#define V_DBPRIO(x) ((x) << S_DBPRIO) +#define F_DBPRIO V_DBPRIO(1U) + +#define S_PIDX 0 +#define M_PIDX 0x3fffU +#define V_PIDX(x) ((x) << S_PIDX) +#define G_PIDX(x) (((x) >> S_PIDX) & M_PIDX) + +#define S_DBTYPE 13 +#define V_DBTYPE(x) ((x) << S_DBTYPE) +#define F_DBTYPE V_DBTYPE(1U) + +#define S_PIDX_T5 0 +#define M_PIDX_T5 0x1fffU +#define V_PIDX_T5(x) ((x) << S_PIDX_T5) +#define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) + +#define A_SGE_PF_GTS 0x4 + +#define S_INGRESSQID 16 +#define M_INGRESSQID 0xffffU +#define V_INGRESSQID(x) ((x) << S_INGRESSQID) +#define G_INGRESSQID(x) (((x) >> S_INGRESSQID) & M_INGRESSQID) + +#define S_SEINTARM 12 +#define V_SEINTARM(x) ((x) << S_SEINTARM) +#define F_SEINTARM V_SEINTARM(1U) + +#define S_CIDXINC 0 +#define M_CIDXINC 0xfffU +#define V_CIDXINC(x) ((x) << S_CIDXINC) +#define G_CIDXINC(x) (((x) >> S_CIDXINC) & M_CIDXINC) + +#define A_SGE_CONTROL 0x1008 + +#define S_RXPKTCPLMODE 18 +#define V_RXPKTCPLMODE(x) ((x) << S_RXPKTCPLMODE) +#define F_RXPKTCPLMODE V_RXPKTCPLMODE(1U) + +#define S_EGRSTATUSPAGESIZE 17 +#define V_EGRSTATUSPAGESIZE(x) ((x) << S_EGRSTATUSPAGESIZE) +#define F_EGRSTATUSPAGESIZE V_EGRSTATUSPAGESIZE(1U) + +#define S_PKTSHIFT 10 +#define M_PKTSHIFT 0x7U +#define V_PKTSHIFT(x) ((x) << S_PKTSHIFT) +#define G_PKTSHIFT(x) (((x) >> S_PKTSHIFT) & M_PKTSHIFT) + +#define S_INGPADBOUNDARY 4 +#define M_INGPADBOUNDARY 0x7U +#define V_INGPADBOUNDARY(x) ((x) << S_INGPADBOUNDARY) +#define G_INGPADBOUNDARY(x) (((x) >> S_INGPADBOUNDARY) & M_INGPADBOUNDARY) + +#define A_SGE_HOST_PAGE_SIZE 0x100c + +#define S_HOSTPAGESIZEPF7 28 +#define M_HOSTPAGESIZEPF7 0xfU +#define V_HOSTPAGESIZEPF7(x) ((x) << S_HOSTPAGESIZEPF7) +#define G_HOSTPAGESIZEPF7(x) (((x) >> S_HOSTPAGESIZEPF7) & M_HOSTPAGESIZEPF7) + +#define S_HOSTPAGESIZEPF6 24 +#define M_HOSTPAGESIZEPF6 0xfU +#define V_HOSTPAGESIZEPF6(x) ((x) << S_HOSTPAGESIZEPF6) +#define G_HOSTPAGESIZEPF6(x) (((x) >> S_HOSTPAGESIZEPF6) & M_HOSTPAGESIZEPF6) + +#define S_HOSTPAGESIZEPF5 20 +#define M_HOSTPAGESIZEPF5 0xfU +#define V_HOSTPAGESIZEPF5(x) ((x) << S_HOSTPAGESIZEPF5) +#define G_HOSTPAGESIZEPF5(x) (((x) >> S_HOSTPAGESIZEPF5) & M_HOSTPAGESIZEPF5) + +#define S_HOSTPAGESIZEPF4 16 +#define M_HOSTPAGESIZEPF4 0xfU +#define V_HOSTPAGESIZEPF4(x) ((x) << S_HOSTPAGESIZEPF4) +#define G_HOSTPAGESIZEPF4(x) (((x) >> S_HOSTPAGESIZEPF4) & M_HOSTPAGESIZEPF4) + +#define S_HOSTPAGESIZEPF3 12 +#define M_HOSTPAGESIZEPF3 0xfU +#define V_HOSTPAGESIZEPF3(x) ((x) << S_HOSTPAGESIZEPF3) +#define G_HOSTPAGESIZEPF3(x) (((x) >> S_HOSTPAGESIZEPF3) & M_HOSTPAGESIZEPF3) + +#define S_HOSTPAGESIZEPF2 8 +#define M_HOSTPAGESIZEPF2 0xfU +#define V_HOSTPAGESIZEPF2(x) ((x) << S_HOSTPAGESIZEPF2) +#define G_HOSTPAGESIZEPF2(x) (((x) >> S_HOSTPAGESIZEPF2) & M_HOSTPAGESIZEPF2) + +#define S_HOSTPAGESIZEPF1 4 +#define M_HOSTPAGESIZEPF1 0xfU +#define V_HOSTPAGESIZEPF1(x) ((x) << S_HOSTPAGESIZEPF1) +#define G_HOSTPAGESIZEPF1(x) (((x) >> S_HOSTPAGESIZEPF1) & M_HOSTPAGESIZEPF1) + +#define S_HOSTPAGESIZEPF0 0 +#define M_HOSTPAGESIZEPF0 0xfU +#define V_HOSTPAGESIZEPF0(x) ((x) << S_HOSTPAGESIZEPF0) +#define G_HOSTPAGESIZEPF0(x) (((x) >> S_HOSTPAGESIZEPF0) & M_HOSTPAGESIZEPF0) + +#define A_SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010 + +#define S_QUEUESPERPAGEPF1 4 +#define M_QUEUESPERPAGEPF1 0xfU +#define V_QUEUESPERPAGEPF1(x) ((x) << S_QUEUESPERPAGEPF1) +#define G_QUEUESPERPAGEPF1(x) (((x) >> S_QUEUESPERPAGEPF1) & M_QUEUESPERPAGEPF1) + +#define S_QUEUESPERPAGEPF0 0 +#define M_QUEUESPERPAGEPF0 0xfU +#define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0) +#define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0) + +#define S_ERR_CPL_EXCEED_IQE_SIZE 22 +#define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE) +#define F_ERR_CPL_EXCEED_IQE_SIZE V_ERR_CPL_EXCEED_IQE_SIZE(1U) + +#define S_ERR_INVALID_CIDX_INC 21 +#define V_ERR_INVALID_CIDX_INC(x) ((x) << S_ERR_INVALID_CIDX_INC) +#define F_ERR_INVALID_CIDX_INC V_ERR_INVALID_CIDX_INC(1U) + +#define S_ERR_CPL_OPCODE_0 19 +#define V_ERR_CPL_OPCODE_0(x) ((x) << S_ERR_CPL_OPCODE_0) +#define F_ERR_CPL_OPCODE_0 V_ERR_CPL_OPCODE_0(1U) + +#define S_ERR_DROPPED_DB 18 +#define V_ERR_DROPPED_DB(x) ((x) << S_ERR_DROPPED_DB) +#define F_ERR_DROPPED_DB V_ERR_DROPPED_DB(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID1 17 +#define V_ERR_DATA_CPL_ON_HIGH_QID1(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID1) +#define F_ERR_DATA_CPL_ON_HIGH_QID1 V_ERR_DATA_CPL_ON_HIGH_QID1(1U) + +#define S_ERR_DATA_CPL_ON_HIGH_QID0 16 +#define V_ERR_DATA_CPL_ON_HIGH_QID0(x) ((x) << S_ERR_DATA_CPL_ON_HIGH_QID0) +#define F_ERR_DATA_CPL_ON_HIGH_QID0 V_ERR_DATA_CPL_ON_HIGH_QID0(1U) + +#define S_ERR_BAD_DB_PIDX3 15 +#define V_ERR_BAD_DB_PIDX3(x) ((x) << S_ERR_BAD_DB_PIDX3) +#define F_ERR_BAD_DB_PIDX3 V_ERR_BAD_DB_PIDX3(1U) + +#define S_ERR_BAD_DB_PIDX2 14 +#define V_ERR_BAD_DB_PIDX2(x) ((x) << S_ERR_BAD_DB_PIDX2) +#define F_ERR_BAD_DB_PIDX2 V_ERR_BAD_DB_PIDX2(1U) + +#define S_ERR_BAD_DB_PIDX1 13 +#define V_ERR_BAD_DB_PIDX1(x) ((x) << S_ERR_BAD_DB_PIDX1) +#define F_ERR_BAD_DB_PIDX1 V_ERR_BAD_DB_PIDX1(1U) + +#define S_ERR_BAD_DB_PIDX0 12 +#define V_ERR_BAD_DB_PIDX0(x) ((x) << S_ERR_BAD_DB_PIDX0) +#define F_ERR_BAD_DB_PIDX0 V_ERR_BAD_DB_PIDX0(1U) + +#define S_ERR_ING_PCIE_CHAN 11 +#define V_ERR_ING_PCIE_CHAN(x) ((x) << S_ERR_ING_PCIE_CHAN) +#define F_ERR_ING_PCIE_CHAN V_ERR_ING_PCIE_CHAN(1U) + +#define S_ERR_ING_CTXT_PRIO 10 +#define V_ERR_ING_CTXT_PRIO(x) ((x) << S_ERR_ING_CTXT_PRIO) +#define F_ERR_ING_CTXT_PRIO V_ERR_ING_CTXT_PRIO(1U) + +#define S_ERR_EGR_CTXT_PRIO 9 +#define V_ERR_EGR_CTXT_PRIO(x) ((x) << S_ERR_EGR_CTXT_PRIO) +#define F_ERR_EGR_CTXT_PRIO V_ERR_EGR_CTXT_PRIO(1U) + +#define S_DBFIFO_HP_INT 8 +#define V_DBFIFO_HP_INT(x) ((x) << S_DBFIFO_HP_INT) +#define F_DBFIFO_HP_INT V_DBFIFO_HP_INT(1U) + +#define S_DBFIFO_LP_INT 7 +#define V_DBFIFO_LP_INT(x) ((x) << S_DBFIFO_LP_INT) +#define F_DBFIFO_LP_INT V_DBFIFO_LP_INT(1U) + +#define S_INGRESS_SIZE_ERR 5 +#define V_INGRESS_SIZE_ERR(x) ((x) << S_INGRESS_SIZE_ERR) +#define F_INGRESS_SIZE_ERR V_INGRESS_SIZE_ERR(1U) + +#define S_EGRESS_SIZE_ERR 4 +#define V_EGRESS_SIZE_ERR(x) ((x) << S_EGRESS_SIZE_ERR) +#define F_EGRESS_SIZE_ERR V_EGRESS_SIZE_ERR(1U) + +#define A_SGE_INT_ENABLE3 0x1040 + +#define A_SGE_FL_BUFFER_SIZE0 0x1044 +#define A_SGE_FL_BUFFER_SIZE1 0x1048 +#define A_SGE_FL_BUFFER_SIZE2 0x104c +#define A_SGE_FL_BUFFER_SIZE3 0x1050 + +#define A_SGE_CONM_CTRL 0x1094 + +#define S_EGRTHRESHOLD 8 +#define M_EGRTHRESHOLD 0x3fU +#define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD) +#define G_EGRTHRESHOLD(x) (((x) >> S_EGRTHRESHOLD) & M_EGRTHRESHOLD) + +#define S_EGRTHRESHOLDPACKING 14 +#define M_EGRTHRESHOLDPACKING 0x3fU +#define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING) +#define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & \ + M_EGRTHRESHOLDPACKING) + +#define S_INGTHRESHOLD 2 +#define M_INGTHRESHOLD 0x3fU +#define V_INGTHRESHOLD(x) ((x) << S_INGTHRESHOLD) +#define G_INGTHRESHOLD(x) (((x) >> S_INGTHRESHOLD) & M_INGTHRESHOLD) + +#define A_SGE_INGRESS_RX_THRESHOLD 0x10a0 + +#define S_THRESHOLD_0 24 +#define M_THRESHOLD_0 0x3fU +#define V_THRESHOLD_0(x) ((x) << S_THRESHOLD_0) +#define G_THRESHOLD_0(x) (((x) >> S_THRESHOLD_0) & M_THRESHOLD_0) + +#define S_THRESHOLD_1 16 +#define M_THRESHOLD_1 0x3fU +#define V_THRESHOLD_1(x) ((x) << S_THRESHOLD_1) +#define G_THRESHOLD_1(x) (((x) >> S_THRESHOLD_1) & M_THRESHOLD_1) + +#define S_THRESHOLD_2 8 +#define M_THRESHOLD_2 0x3fU +#define V_THRESHOLD_2(x) ((x) << S_THRESHOLD_2) +#define G_THRESHOLD_2(x) (((x) >> S_THRESHOLD_2) & M_THRESHOLD_2) + +#define S_THRESHOLD_3 0 +#define M_THRESHOLD_3 0x3fU +#define V_THRESHOLD_3(x) ((x) << S_THRESHOLD_3) +#define G_THRESHOLD_3(x) (((x) >> S_THRESHOLD_3) & M_THRESHOLD_3) + +#define A_SGE_TIMER_VALUE_0_AND_1 0x10b8 + +#define S_TIMERVALUE0 16 +#define M_TIMERVALUE0 0xffffU +#define V_TIMERVALUE0(x) ((x) << S_TIMERVALUE0) +#define G_TIMERVALUE0(x) (((x) >> S_TIMERVALUE0) & M_TIMERVALUE0) + +#define S_TIMERVALUE1 0 +#define M_TIMERVALUE1 0xffffU +#define V_TIMERVALUE1(x) ((x) << S_TIMERVALUE1) +#define G_TIMERVALUE1(x) (((x) >> S_TIMERVALUE1) & M_TIMERVALUE1) + +#define A_SGE_TIMER_VALUE_2_AND_3 0x10bc + +#define S_TIMERVALUE2 16 +#define M_TIMERVALUE2 0xffffU +#define V_TIMERVALUE2(x) ((x) << S_TIMERVALUE2) +#define G_TIMERVALUE2(x) (((x) >> S_TIMERVALUE2) & M_TIMERVALUE2) + +#define S_TIMERVALUE3 0 +#define M_TIMERVALUE3 0xffffU +#define V_TIMERVALUE3(x) ((x) << S_TIMERVALUE3) +#define G_TIMERVALUE3(x) (((x) >> S_TIMERVALUE3) & M_TIMERVALUE3) + +#define A_SGE_TIMER_VALUE_4_AND_5 0x10c0 + +#define S_TIMERVALUE4 16 +#define M_TIMERVALUE4 0xffffU +#define V_TIMERVALUE4(x) ((x) << S_TIMERVALUE4) +#define G_TIMERVALUE4(x) (((x) >> S_TIMERVALUE4) & M_TIMERVALUE4) + +#define S_TIMERVALUE5 0 +#define M_TIMERVALUE5 0xffffU +#define V_TIMERVALUE5(x) ((x) << S_TIMERVALUE5) +#define G_TIMERVALUE5(x) (((x) >> S_TIMERVALUE5) & M_TIMERVALUE5) + +#define A_SGE_DEBUG_INDEX 0x10cc +#define A_SGE_DEBUG_DATA_HIGH 0x10d0 +#define A_SGE_DEBUG_DATA_LOW 0x10d4 +#define A_SGE_STAT_CFG 0x10ec + +#define S_STATMODE 2 +#define M_STATMODE 0x3U +#define V_STATMODE(x) ((x) << S_STATMODE) +#define G_STATMODE(x) (((x) >> S_STATMODE) & M_STATMODE) + +#define S_STATSOURCE_T5 9 +#define M_STATSOURCE_T5 0xfU +#define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5) +#define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) + +#define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 + +#define A_SGE_CONTROL2 0x1124 + +#define S_INGPACKBOUNDARY 16 +#define M_INGPACKBOUNDARY 0x7U +#define V_INGPACKBOUNDARY(x) ((x) << S_INGPACKBOUNDARY) +#define G_INGPACKBOUNDARY(x) (((x) >> S_INGPACKBOUNDARY) & M_INGPACKBOUNDARY) + +#define S_BUSY 31 +#define V_BUSY(x) ((x) << S_BUSY) +#define F_BUSY V_BUSY(1U) + +#define A_SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8 +#define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc + +/* registers for module PCIE */ +#define PCIE_BASE_ADDR 0x3000 + +#define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068 + +#define S_PCIEOFST 10 +#define M_PCIEOFST 0x3fffffU +#define V_PCIEOFST(x) ((x) << S_PCIEOFST) +#define G_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST) + +#define S_BIR 8 +#define M_BIR 0x3U +#define V_BIR(x) ((x) << S_BIR) +#define G_BIR(x) (((x) >> S_BIR) & M_BIR) + +#define S_WINDOW 0 +#define M_WINDOW 0xffU +#define V_WINDOW(x) ((x) << S_WINDOW) +#define G_WINDOW(x) (((x) >> S_WINDOW) & M_WINDOW) + +#define A_PCIE_MEM_ACCESS_OFFSET 0x306c + +#define S_PFNUM 0 +#define M_PFNUM 0x7U +#define V_PFNUM(x) ((x) << S_PFNUM) +#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM) + +#define A_PCIE_FW 0x30b8 +#define A_PCIE_FW_PF 0x30bc + +/* registers for module CIM */ +#define CIM_BASE_ADDR 0x7b00 + +#define A_CIM_PF_MAILBOX_DATA 0x240 +#define A_CIM_PF_MAILBOX_CTRL 0x280 + +#define S_MBMSGVALID 3 +#define V_MBMSGVALID(x) ((x) << S_MBMSGVALID) +#define F_MBMSGVALID V_MBMSGVALID(1U) + +#define S_MBOWNER 0 +#define M_MBOWNER 0x3U +#define V_MBOWNER(x) ((x) << S_MBOWNER) +#define G_MBOWNER(x) (((x) >> S_MBOWNER) & M_MBOWNER) + +#define A_CIM_PF_MAILBOX_CTRL_SHADOW_COPY 0x290 +#define A_CIM_BOOT_CFG 0x7b00 + +#define S_UPCRST 0 +#define V_UPCRST(x) ((x) << S_UPCRST) +#define F_UPCRST V_UPCRST(1U) + +/* registers for module TP */ +#define TP_BASE_ADDR 0x7d00 + +#define A_TP_TIMER_RESOLUTION 0x7d90 + +#define S_TIMERRESOLUTION 16 +#define M_TIMERRESOLUTION 0xffU +#define V_TIMERRESOLUTION(x) ((x) << S_TIMERRESOLUTION) +#define G_TIMERRESOLUTION(x) (((x) >> S_TIMERRESOLUTION) & M_TIMERRESOLUTION) + +#define S_DELAYEDACKRESOLUTION 0 +#define M_DELAYEDACKRESOLUTION 0xffU +#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION) +#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & \ + M_DELAYEDACKRESOLUTION) + +#define A_TP_CCTRL_TABLE 0x7ddc + +#define A_TP_MTU_TABLE 0x7de4 + +#define S_MTUINDEX 24 +#define M_MTUINDEX 0xffU +#define V_MTUINDEX(x) ((x) << S_MTUINDEX) +#define G_MTUINDEX(x) (((x) >> S_MTUINDEX) & M_MTUINDEX) + +#define S_MTUWIDTH 16 +#define M_MTUWIDTH 0xfU +#define V_MTUWIDTH(x) ((x) << S_MTUWIDTH) +#define G_MTUWIDTH(x) (((x) >> S_MTUWIDTH) & M_MTUWIDTH) + +#define S_MTUVALUE 0 +#define M_MTUVALUE 0x3fffU +#define V_MTUVALUE(x) ((x) << S_MTUVALUE) +#define G_MTUVALUE(x) (((x) >> S_MTUVALUE) & M_MTUVALUE) + +#define A_TP_PIO_ADDR 0x7e40 +#define A_TP_PIO_DATA 0x7e44 + +#define A_TP_VLAN_PRI_MAP 0x140 + +#define S_FRAGMENTATION 9 +#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION) +#define F_FRAGMENTATION V_FRAGMENTATION(1U) + +#define S_MPSHITTYPE 8 +#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE) +#define F_MPSHITTYPE V_MPSHITTYPE(1U) + +#define S_MACMATCH 7 +#define V_MACMATCH(x) ((x) << S_MACMATCH) +#define F_MACMATCH V_MACMATCH(1U) + +#define S_ETHERTYPE 6 +#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE) +#define F_ETHERTYPE V_ETHERTYPE(1U) + +#define S_PROTOCOL 5 +#define V_PROTOCOL(x) ((x) << S_PROTOCOL) +#define F_PROTOCOL V_PROTOCOL(1U) + +#define S_TOS 4 +#define V_TOS(x) ((x) << S_TOS) +#define F_TOS V_TOS(1U) + +#define S_VLAN 3 +#define V_VLAN(x) ((x) << S_VLAN) +#define F_VLAN V_VLAN(1U) + +#define S_VNIC_ID 2 +#define V_VNIC_ID(x) ((x) << S_VNIC_ID) +#define F_VNIC_ID V_VNIC_ID(1U) + +#define S_PORT 1 +#define V_PORT(x) ((x) << S_PORT) +#define F_PORT V_PORT(1U) + +#define S_FCOE 0 +#define V_FCOE(x) ((x) << S_FCOE) +#define F_FCOE V_FCOE(1U) + +#define A_TP_INGRESS_CONFIG 0x141 + +#define S_VNIC 11 +#define V_VNIC(x) ((x) << S_VNIC) +#define F_VNIC V_VNIC(1U) + +#define S_CSUM_HAS_PSEUDO_HDR 10 +#define V_CSUM_HAS_PSEUDO_HDR(x) ((x) << S_CSUM_HAS_PSEUDO_HDR) +#define F_CSUM_HAS_PSEUDO_HDR V_CSUM_HAS_PSEUDO_HDR(1U) + +/* registers for module MPS */ +#define MPS_BASE_ADDR 0x9000 + +#define S_REPLICATE 11 +#define V_REPLICATE(x) ((x) << S_REPLICATE) +#define F_REPLICATE V_REPLICATE(1U) + +#define S_PF 8 +#define M_PF 0x7U +#define V_PF(x) ((x) << S_PF) +#define G_PF(x) (((x) >> S_PF) & M_PF) + +#define S_VF_VALID 7 +#define V_VF_VALID(x) ((x) << S_VF_VALID) +#define F_VF_VALID V_VF_VALID(1U) + +#define S_VF 0 +#define M_VF 0x7fU +#define V_VF(x) ((x) << S_VF) +#define G_VF(x) (((x) >> S_VF) & M_VF) + +#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400 +#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408 +#define A_MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c +#define A_MPS_PORT_STAT_TX_PORT_BCAST_L 0x410 +#define A_MPS_PORT_STAT_TX_PORT_BCAST_H 0x414 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_L 0x418 +#define A_MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c +#define A_MPS_PORT_STAT_TX_PORT_UCAST_L 0x420 +#define A_MPS_PORT_STAT_TX_PORT_UCAST_H 0x424 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_L 0x428 +#define A_MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c +#define A_MPS_PORT_STAT_TX_PORT_64B_L 0x430 +#define A_MPS_PORT_STAT_TX_PORT_64B_H 0x434 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438 +#define A_MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440 +#define A_MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448 +#define A_MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450 +#define A_MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458 +#define A_MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460 +#define A_MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464 +#define A_MPS_PORT_STAT_TX_PORT_DROP_L 0x468 +#define A_MPS_PORT_STAT_TX_PORT_DROP_H 0x46c +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470 +#define A_MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_L 0x478 +#define A_MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c +#define A_MPS_PORT_STAT_TX_PORT_PPP1_L 0x480 +#define A_MPS_PORT_STAT_TX_PORT_PPP1_H 0x484 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_L 0x488 +#define A_MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c +#define A_MPS_PORT_STAT_TX_PORT_PPP3_L 0x490 +#define A_MPS_PORT_STAT_TX_PORT_PPP3_H 0x494 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_L 0x498 +#define A_MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c +#define A_MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0 +#define A_MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8 +#define A_MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac +#define A_MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0 +#define A_MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0 +#define A_MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8 +#define A_MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc +#define A_MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0 +#define A_MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8 +#define A_MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc +#define A_MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0 +#define A_MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8 +#define A_MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec +#define A_MPS_PORT_STAT_LB_PORT_64B_L 0x4f0 +#define A_MPS_PORT_STAT_LB_PORT_64B_H 0x4f4 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8 +#define A_MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500 +#define A_MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508 +#define A_MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510 +#define A_MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518 +#define A_MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520 +#define A_MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528 +#define A_MPS_PORT_STAT_LB_PORT_DROP_FRAMES_H 0x52c +#define A_MPS_PORT_STAT_RX_PORT_BYTES_L 0x540 +#define A_MPS_PORT_STAT_RX_PORT_BYTES_H 0x544 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548 +#define A_MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c +#define A_MPS_PORT_STAT_RX_PORT_BCAST_L 0x550 +#define A_MPS_PORT_STAT_RX_PORT_BCAST_H 0x554 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_L 0x558 +#define A_MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c +#define A_MPS_PORT_STAT_RX_PORT_UCAST_L 0x560 +#define A_MPS_PORT_STAT_RX_PORT_UCAST_H 0x564 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568 +#define A_MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570 +#define A_MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578 +#define A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580 +#define A_MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588 +#define A_MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c +#define A_MPS_PORT_STAT_RX_PORT_64B_L 0x590 +#define A_MPS_PORT_STAT_RX_PORT_64B_H 0x594 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598 +#define A_MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0 +#define A_MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8 +#define A_MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0 +#define A_MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8 +#define A_MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0 +#define A_MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8 +#define A_MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc +#define A_MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0 +#define A_MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8 +#define A_MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc +#define A_MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0 +#define A_MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8 +#define A_MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec +#define A_MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0 +#define A_MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8 +#define A_MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc +#define A_MPS_PORT_STAT_RX_PORT_PPP6_L 0x600 +#define A_MPS_PORT_STAT_RX_PORT_PPP6_H 0x604 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_L 0x608 +#define A_MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 +#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 +#define A_MPS_CMN_CTL 0x9000 + +#define S_NUMPORTS 0 +#define M_NUMPORTS 0x3U +#define V_NUMPORTS(x) ((x) << S_NUMPORTS) +#define G_NUMPORTS(x) (((x) >> S_NUMPORTS) & M_NUMPORTS) + +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640 +#define A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648 +#define A_MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650 +#define A_MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658 +#define A_MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660 +#define A_MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668 +#define A_MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670 +#define A_MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678 +#define A_MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680 +#define A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688 +#define A_MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690 +#define A_MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698 +#define A_MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0 +#define A_MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8 +#define A_MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0 +#define A_MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8 +#define A_MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc + +/* registers for module ULP_RX */ +#define ULP_RX_BASE_ADDR 0x19150 + +#define S_HPZ0 0 +#define M_HPZ0 0xfU +#define V_HPZ0(x) ((x) << S_HPZ0) +#define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) + +#define A_ULP_RX_TDDP_PSZ 0x19178 + +/* registers for module SF */ +#define SF_BASE_ADDR 0x193f8 + +#define A_SF_DATA 0x193f8 +#define A_SF_OP 0x193fc + +#define S_SF_LOCK 4 +#define V_SF_LOCK(x) ((x) << S_SF_LOCK) +#define F_SF_LOCK V_SF_LOCK(1U) + +#define S_CONT 3 +#define V_CONT(x) ((x) << S_CONT) +#define F_CONT V_CONT(1U) + +#define S_BYTECNT 1 +#define M_BYTECNT 0x3U +#define V_BYTECNT(x) ((x) << S_BYTECNT) +#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT) + +#define S_OP 0 +#define V_OP(x) ((x) << S_OP) +#define F_OP V_OP(1U) + +/* registers for module PL */ +#define PL_BASE_ADDR 0x19400 + +#define S_SOURCEPF 8 +#define M_SOURCEPF 0x7U +#define V_SOURCEPF(x) ((x) << S_SOURCEPF) +#define G_SOURCEPF(x) (((x) >> S_SOURCEPF) & M_SOURCEPF) + +#define A_PL_PF_INT_ENABLE 0x3c4 + +#define S_PFSW 3 +#define V_PFSW(x) ((x) << S_PFSW) +#define F_PFSW V_PFSW(1U) + +#define S_PFCIM 1 +#define V_PFCIM(x) ((x) << S_PFCIM) +#define F_PFCIM V_PFCIM(1U) + +#define A_PL_WHOAMI 0x19400 + +#define A_PL_RST 0x19428 + +#define A_PL_INT_MAP0 0x19414 + +#define S_PIORST 1 +#define V_PIORST(x) ((x) << S_PIORST) +#define F_PIORST V_PIORST(1U) + +#define S_PIORSTMODE 0 +#define V_PIORSTMODE(x) ((x) << S_PIORSTMODE) +#define F_PIORSTMODE V_PIORSTMODE(1U) + +#define A_PL_REV 0x1943c + +#define S_REV 0 +#define M_REV 0xfU +#define V_REV(x) ((x) << S_REV) +#define G_REV(x) (((x) >> S_REV) & M_REV) diff --git a/drivers/net/cxgbe/base/t4_regs_values.h b/drivers/net/cxgbe/base/t4_regs_values.h new file mode 100644 index 0000000000..181bd9d268 --- /dev/null +++ b/drivers/net/cxgbe/base/t4_regs_values.h @@ -0,0 +1,168 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __T4_REGS_VALUES_H__ +#define __T4_REGS_VALUES_H__ + +/* + * This file contains definitions for various T4 register value hardware + * constants. The types of values encoded here are predominantly those for + * register fields which control "modal" behavior. For the most part, we do + * not include definitions for register fields which are simple numeric + * metrics, etc. + */ + +/* + * SGE definitions. + * ================ + */ + +/* + * SGE register field values. + */ + +/* CONTROL register */ +#define X_RXPKTCPLMODE_SPLIT 1 +#define X_INGPCIEBOUNDARY_32B 0 +#define X_INGPADBOUNDARY_SHIFT 5 + +/* CONTROL2 register */ +#define X_INGPACKBOUNDARY_SHIFT 5 +#define X_INGPACKBOUNDARY_16B 0 + +/* GTS register */ +#define X_TIMERREG_RESTART_COUNTER 6 +#define X_TIMERREG_UPDATE_CIDX 7 + +/* + * Egress Context field values + */ +#define X_FETCHBURSTMIN_64B 2 +#define X_FETCHBURSTMAX_256B 2 +#define X_FETCHBURSTMAX_512B 3 + +#define X_HOSTFCMODE_NONE 0 + +/* + * Ingress Context field values + */ +#define X_UPDATEDELIVERY_INTERRUPT 1 + +#define X_RSPD_TYPE_FLBUF 0 +#define X_RSPD_TYPE_CPL 1 + +/* + * Context field definitions. This is by no means a complete list of SGE + * Context fields. In the vast majority of cases the firmware initializes + * things the way they need to be set up. But in a few small cases, we need + * to compute new values and ship them off to the firmware to be applied to + * the SGE Conexts ... + */ + +/* + * Congestion Manager Definitions. + */ +#define S_CONMCTXT_CNGTPMODE 19 +#define M_CONMCTXT_CNGTPMODE 0x3 +#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE) +#define G_CONMCTXT_CNGTPMODE(x) \ + (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE) +#define S_CONMCTXT_CNGCHMAP 0 +#define M_CONMCTXT_CNGCHMAP 0xffff +#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP) +#define G_CONMCTXT_CNGCHMAP(x) \ + (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP) + +#define X_CONMCTXT_CNGTPMODE_QUEUE 1 +#define X_CONMCTXT_CNGTPMODE_CHANNEL 2 + +/* + * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 + +/* + * CIM definitions. + * ================ + */ + +/* + * CIM register field values. + */ +#define X_MBOWNER_NONE 0 +#define X_MBOWNER_FW 1 +#define X_MBOWNER_PL 2 + +/* + * PCI-E definitions. + * ================== + */ +#define X_WINDOW_SHIFT 10 +#define X_PCIEOFST_SHIFT 10 + +/* + * TP definitions. + * =============== + */ + +/* + * TP_VLAN_PRI_MAP controls which subset of fields will be present in the + * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP + * selects for a particular field being present. These fields, when present + * in the Compressed Filter Tuple, have the following widths in bits. + */ +#define W_FT_FCOE 1 +#define W_FT_PORT 3 +#define W_FT_VNIC_ID 17 +#define W_FT_VLAN 17 +#define W_FT_TOS 8 +#define W_FT_PROTOCOL 8 +#define W_FT_ETHERTYPE 16 +#define W_FT_MACMATCH 9 +#define W_FT_MPSHITTYPE 3 +#define W_FT_FRAGMENTATION 1 + +#endif /* __T4_REGS_VALUES_H__ */ diff --git a/drivers/net/cxgbe/base/t4fw_interface.h b/drivers/net/cxgbe/base/t4fw_interface.h new file mode 100644 index 0000000000..74f19fe7f2 --- /dev/null +++ b/drivers/net/cxgbe/base/t4fw_interface.h @@ -0,0 +1,1730 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Chelsio Communications. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Chelsio Communications nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _T4FW_INTERFACE_H_ +#define _T4FW_INTERFACE_H_ + +/****************************************************************************** + * R E T U R N V A L U E S + ********************************/ + +enum fw_retval { + FW_SUCCESS = 0, /* completed successfully */ + FW_EPERM = 1, /* operation not permitted */ + FW_ENOENT = 2, /* no such file or directory */ + FW_EIO = 5, /* input/output error; hw bad */ + FW_ENOEXEC = 8, /* exec format error; inv microcode */ + FW_EAGAIN = 11, /* try again */ + FW_ENOMEM = 12, /* out of memory */ + FW_EFAULT = 14, /* bad address; fw bad */ + FW_EBUSY = 16, /* resource busy */ + FW_EEXIST = 17, /* file exists */ + FW_ENODEV = 19, /* no such device */ + FW_EINVAL = 22, /* invalid argument */ + FW_ENOSPC = 28, /* no space left on device */ + FW_ENOSYS = 38, /* functionality not implemented */ + FW_ENODATA = 61, /* no data available */ + FW_EPROTO = 71, /* protocol error */ + FW_EADDRINUSE = 98, /* address already in use */ + FW_EADDRNOTAVAIL = 99, /* cannot assigned requested address */ + FW_ENETDOWN = 100, /* network is down */ + FW_ENETUNREACH = 101, /* network is unreachable */ + FW_ENOBUFS = 105, /* no buffer space available */ + FW_ETIMEDOUT = 110, /* timeout */ + FW_EINPROGRESS = 115, /* fw internal */ +}; + +/****************************************************************************** + * M E M O R Y T Y P E s + ******************************/ + +enum fw_memtype { + FW_MEMTYPE_EDC0 = 0x0, + FW_MEMTYPE_EDC1 = 0x1, + FW_MEMTYPE_EXTMEM = 0x2, + FW_MEMTYPE_FLASH = 0x4, + FW_MEMTYPE_INTERNAL = 0x5, + FW_MEMTYPE_EXTMEM1 = 0x6, +}; + +/****************************************************************************** + * W O R K R E Q U E S T s + ********************************/ + +enum fw_wr_opcodes { + FW_ETH_TX_PKT_WR = 0x08, + FW_ETH_TX_PKTS_WR = 0x09, +}; + +/* + * Generic work request header flit0 + */ +struct fw_wr_hdr { + __be32 hi; + __be32 lo; +}; + +/* work request opcode (hi) + */ +#define S_FW_WR_OP 24 +#define M_FW_WR_OP 0xff +#define V_FW_WR_OP(x) ((x) << S_FW_WR_OP) +#define G_FW_WR_OP(x) (((x) >> S_FW_WR_OP) & M_FW_WR_OP) + +/* work request immediate data length (hi) + */ +#define S_FW_WR_IMMDLEN 0 +#define M_FW_WR_IMMDLEN 0xff +#define V_FW_WR_IMMDLEN(x) ((x) << S_FW_WR_IMMDLEN) +#define G_FW_WR_IMMDLEN(x) \ + (((x) >> S_FW_WR_IMMDLEN) & M_FW_WR_IMMDLEN) + +/* egress queue status update to egress queue status entry (lo) + */ +#define S_FW_WR_EQUEQ 30 +#define M_FW_WR_EQUEQ 0x1 +#define V_FW_WR_EQUEQ(x) ((x) << S_FW_WR_EQUEQ) +#define G_FW_WR_EQUEQ(x) (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ) +#define F_FW_WR_EQUEQ V_FW_WR_EQUEQ(1U) + +/* length in units of 16-bytes (lo) + */ +#define S_FW_WR_LEN16 0 +#define M_FW_WR_LEN16 0xff +#define V_FW_WR_LEN16(x) ((x) << S_FW_WR_LEN16) +#define G_FW_WR_LEN16(x) (((x) >> S_FW_WR_LEN16) & M_FW_WR_LEN16) + +struct fw_eth_tx_pkt_wr { + __be32 op_immdlen; + __be32 equiq_to_len16; + __be64 r3; +}; + +#define S_FW_ETH_TX_PKT_WR_IMMDLEN 0 +#define M_FW_ETH_TX_PKT_WR_IMMDLEN 0x1ff +#define V_FW_ETH_TX_PKT_WR_IMMDLEN(x) ((x) << S_FW_ETH_TX_PKT_WR_IMMDLEN) +#define G_FW_ETH_TX_PKT_WR_IMMDLEN(x) \ + (((x) >> S_FW_ETH_TX_PKT_WR_IMMDLEN) & M_FW_ETH_TX_PKT_WR_IMMDLEN) + +struct fw_eth_tx_pkts_wr { + __be32 op_pkd; + __be32 equiq_to_len16; + __be32 r3; + __be16 plen; + __u8 npkt; + __u8 type; +}; + +/****************************************************************************** + * C O M M A N D s + *********************/ + +/* + * The maximum length of time, in miliseconds, that we expect any firmware + * command to take to execute and return a reply to the host. The RESET + * and INITIALIZE commands can take a fair amount of time to execute but + * most execute in far less time than this maximum. This constant is used + * by host software to determine how long to wait for a firmware command + * reply before declaring the firmware as dead/unreachable ... + */ +#define FW_CMD_MAX_TIMEOUT 10000 + +/* + * If a host driver does a HELLO and discovers that there's already a MASTER + * selected, we may have to wait for that MASTER to finish issuing RESET, + * configuration and INITIALIZE commands. Also, there's a possibility that + * our own HELLO may get lost if it happens right as the MASTER is issuign a + * RESET command, so we need to be willing to make a few retries of our HELLO. + */ +#define FW_CMD_HELLO_TIMEOUT (3 * FW_CMD_MAX_TIMEOUT) +#define FW_CMD_HELLO_RETRIES 3 + +enum fw_cmd_opcodes { + FW_RESET_CMD = 0x03, + FW_HELLO_CMD = 0x04, + FW_BYE_CMD = 0x05, + FW_INITIALIZE_CMD = 0x06, + FW_CAPS_CONFIG_CMD = 0x07, + FW_PARAMS_CMD = 0x08, + FW_IQ_CMD = 0x10, + FW_EQ_ETH_CMD = 0x12, + FW_VI_CMD = 0x14, + FW_VI_MAC_CMD = 0x15, + FW_VI_RXMODE_CMD = 0x16, + FW_VI_ENABLE_CMD = 0x17, + FW_PORT_CMD = 0x1b, + FW_RSS_IND_TBL_CMD = 0x20, + FW_RSS_VI_CONFIG_CMD = 0x23, + FW_DEBUG_CMD = 0x81, +}; + +/* + * Generic command header flit0 + */ +struct fw_cmd_hdr { + __be32 hi; + __be32 lo; +}; + +#define S_FW_CMD_OP 24 +#define M_FW_CMD_OP 0xff +#define V_FW_CMD_OP(x) ((x) << S_FW_CMD_OP) +#define G_FW_CMD_OP(x) (((x) >> S_FW_CMD_OP) & M_FW_CMD_OP) + +#define S_FW_CMD_REQUEST 23 +#define M_FW_CMD_REQUEST 0x1 +#define V_FW_CMD_REQUEST(x) ((x) << S_FW_CMD_REQUEST) +#define G_FW_CMD_REQUEST(x) (((x) >> S_FW_CMD_REQUEST) & M_FW_CMD_REQUEST) +#define F_FW_CMD_REQUEST V_FW_CMD_REQUEST(1U) + +#define S_FW_CMD_READ 22 +#define M_FW_CMD_READ 0x1 +#define V_FW_CMD_READ(x) ((x) << S_FW_CMD_READ) +#define G_FW_CMD_READ(x) (((x) >> S_FW_CMD_READ) & M_FW_CMD_READ) +#define F_FW_CMD_READ V_FW_CMD_READ(1U) + +#define S_FW_CMD_WRITE 21 +#define M_FW_CMD_WRITE 0x1 +#define V_FW_CMD_WRITE(x) ((x) << S_FW_CMD_WRITE) +#define G_FW_CMD_WRITE(x) (((x) >> S_FW_CMD_WRITE) & M_FW_CMD_WRITE) +#define F_FW_CMD_WRITE V_FW_CMD_WRITE(1U) + +#define S_FW_CMD_EXEC 20 +#define M_FW_CMD_EXEC 0x1 +#define V_FW_CMD_EXEC(x) ((x) << S_FW_CMD_EXEC) +#define G_FW_CMD_EXEC(x) (((x) >> S_FW_CMD_EXEC) & M_FW_CMD_EXEC) +#define F_FW_CMD_EXEC V_FW_CMD_EXEC(1U) + +#define S_FW_CMD_RETVAL 8 +#define M_FW_CMD_RETVAL 0xff +#define V_FW_CMD_RETVAL(x) ((x) << S_FW_CMD_RETVAL) +#define G_FW_CMD_RETVAL(x) (((x) >> S_FW_CMD_RETVAL) & M_FW_CMD_RETVAL) + +#define S_FW_CMD_LEN16 0 +#define M_FW_CMD_LEN16 0xff +#define V_FW_CMD_LEN16(x) ((x) << S_FW_CMD_LEN16) +#define G_FW_CMD_LEN16(x) (((x) >> S_FW_CMD_LEN16) & M_FW_CMD_LEN16) + +#define FW_LEN16(fw_struct) V_FW_CMD_LEN16(sizeof(fw_struct) / 16) + +struct fw_reset_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 val; + __be32 halt_pkd; +}; + +#define S_FW_RESET_CMD_HALT 31 +#define M_FW_RESET_CMD_HALT 0x1 +#define V_FW_RESET_CMD_HALT(x) ((x) << S_FW_RESET_CMD_HALT) +#define G_FW_RESET_CMD_HALT(x) \ + (((x) >> S_FW_RESET_CMD_HALT) & M_FW_RESET_CMD_HALT) +#define F_FW_RESET_CMD_HALT V_FW_RESET_CMD_HALT(1U) + +enum { + FW_HELLO_CMD_STAGE_OS = 0, +}; + +struct fw_hello_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be32 err_to_clearinit; + __be32 fwrev; +}; + +#define S_FW_HELLO_CMD_ERR 31 +#define M_FW_HELLO_CMD_ERR 0x1 +#define V_FW_HELLO_CMD_ERR(x) ((x) << S_FW_HELLO_CMD_ERR) +#define G_FW_HELLO_CMD_ERR(x) \ + (((x) >> S_FW_HELLO_CMD_ERR) & M_FW_HELLO_CMD_ERR) +#define F_FW_HELLO_CMD_ERR V_FW_HELLO_CMD_ERR(1U) + +#define S_FW_HELLO_CMD_INIT 30 +#define M_FW_HELLO_CMD_INIT 0x1 +#define V_FW_HELLO_CMD_INIT(x) ((x) << S_FW_HELLO_CMD_INIT) +#define G_FW_HELLO_CMD_INIT(x) \ + (((x) >> S_FW_HELLO_CMD_INIT) & M_FW_HELLO_CMD_INIT) +#define F_FW_HELLO_CMD_INIT V_FW_HELLO_CMD_INIT(1U) + +#define S_FW_HELLO_CMD_MASTERDIS 29 +#define M_FW_HELLO_CMD_MASTERDIS 0x1 +#define V_FW_HELLO_CMD_MASTERDIS(x) ((x) << S_FW_HELLO_CMD_MASTERDIS) +#define G_FW_HELLO_CMD_MASTERDIS(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERDIS) & M_FW_HELLO_CMD_MASTERDIS) +#define F_FW_HELLO_CMD_MASTERDIS V_FW_HELLO_CMD_MASTERDIS(1U) + +#define S_FW_HELLO_CMD_MASTERFORCE 28 +#define M_FW_HELLO_CMD_MASTERFORCE 0x1 +#define V_FW_HELLO_CMD_MASTERFORCE(x) ((x) << S_FW_HELLO_CMD_MASTERFORCE) +#define G_FW_HELLO_CMD_MASTERFORCE(x) \ + (((x) >> S_FW_HELLO_CMD_MASTERFORCE) & M_FW_HELLO_CMD_MASTERFORCE) +#define F_FW_HELLO_CMD_MASTERFORCE V_FW_HELLO_CMD_MASTERFORCE(1U) + +#define S_FW_HELLO_CMD_MBMASTER 24 +#define M_FW_HELLO_CMD_MBMASTER 0xf +#define V_FW_HELLO_CMD_MBMASTER(x) ((x) << S_FW_HELLO_CMD_MBMASTER) +#define G_FW_HELLO_CMD_MBMASTER(x) \ + (((x) >> S_FW_HELLO_CMD_MBMASTER) & M_FW_HELLO_CMD_MBMASTER) + +#define S_FW_HELLO_CMD_MBASYNCNOT 20 +#define M_FW_HELLO_CMD_MBASYNCNOT 0x7 +#define V_FW_HELLO_CMD_MBASYNCNOT(x) ((x) << S_FW_HELLO_CMD_MBASYNCNOT) +#define G_FW_HELLO_CMD_MBASYNCNOT(x) \ + (((x) >> S_FW_HELLO_CMD_MBASYNCNOT) & M_FW_HELLO_CMD_MBASYNCNOT) + +#define S_FW_HELLO_CMD_STAGE 17 +#define M_FW_HELLO_CMD_STAGE 0x7 +#define V_FW_HELLO_CMD_STAGE(x) ((x) << S_FW_HELLO_CMD_STAGE) +#define G_FW_HELLO_CMD_STAGE(x) \ + (((x) >> S_FW_HELLO_CMD_STAGE) & M_FW_HELLO_CMD_STAGE) + +#define S_FW_HELLO_CMD_CLEARINIT 16 +#define M_FW_HELLO_CMD_CLEARINIT 0x1 +#define V_FW_HELLO_CMD_CLEARINIT(x) ((x) << S_FW_HELLO_CMD_CLEARINIT) +#define G_FW_HELLO_CMD_CLEARINIT(x) \ + (((x) >> S_FW_HELLO_CMD_CLEARINIT) & M_FW_HELLO_CMD_CLEARINIT) +#define F_FW_HELLO_CMD_CLEARINIT V_FW_HELLO_CMD_CLEARINIT(1U) + +struct fw_bye_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +struct fw_initialize_cmd { + __be32 op_to_write; + __be32 retval_len16; + __be64 r3; +}; + +enum fw_caps_config_nic { + FW_CAPS_CONFIG_NIC_HASHFILTER = 0x00000020, + FW_CAPS_CONFIG_NIC_ETHOFLD = 0x00000040, +}; + +enum fw_memtype_cf { + FW_MEMTYPE_CF_FLASH = FW_MEMTYPE_FLASH, +}; + +struct fw_caps_config_cmd { + __be32 op_to_write; + __be32 cfvalid_to_len16; + __be32 r2; + __be32 hwmbitmap; + __be16 nbmcaps; + __be16 linkcaps; + __be16 switchcaps; + __be16 r3; + __be16 niccaps; + __be16 toecaps; + __be16 rdmacaps; + __be16 r4; + __be16 iscsicaps; + __be16 fcoecaps; + __be32 cfcsum; + __be32 finiver; + __be32 finicsum; +}; + +#define S_FW_CAPS_CONFIG_CMD_CFVALID 27 +#define M_FW_CAPS_CONFIG_CMD_CFVALID 0x1 +#define V_FW_CAPS_CONFIG_CMD_CFVALID(x) ((x) << S_FW_CAPS_CONFIG_CMD_CFVALID) +#define G_FW_CAPS_CONFIG_CMD_CFVALID(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_CFVALID) & M_FW_CAPS_CONFIG_CMD_CFVALID) +#define F_FW_CAPS_CONFIG_CMD_CFVALID V_FW_CAPS_CONFIG_CMD_CFVALID(1U) + +#define S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 24 +#define M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF 0x7 +#define V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMTYPE_CF) + +#define S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 16 +#define M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF 0xff +#define V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + ((x) << S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) +#define G_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) \ + (((x) >> S_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) & \ + M_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF) + +/* + * params command mnemonics + */ +enum fw_params_mnem { + FW_PARAMS_MNEM_DEV = 1, /* device params */ + FW_PARAMS_MNEM_PFVF = 2, /* function params */ + FW_PARAMS_MNEM_DMAQ = 4, /* dma queue params */ +}; + +/* + * device parameters + */ +enum fw_params_param_dev { + FW_PARAMS_PARAM_DEV_CCLK = 0x00, /* chip core clock in khz */ + FW_PARAMS_PARAM_DEV_PORTVEC = 0x01, /* the port vector */ + FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, +}; + +/* + * physical and virtual function parameters + */ +enum fw_params_param_pfvf { + FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP = 0x31 +}; + +/* + * dma queue parameters + */ +enum fw_params_param_dmaq { + FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01, + FW_PARAMS_PARAM_DMAQ_CONM_CTXT = 0x20, +}; + +#define S_FW_PARAMS_MNEM 24 +#define M_FW_PARAMS_MNEM 0xff +#define V_FW_PARAMS_MNEM(x) ((x) << S_FW_PARAMS_MNEM) +#define G_FW_PARAMS_MNEM(x) \ + (((x) >> S_FW_PARAMS_MNEM) & M_FW_PARAMS_MNEM) + +#define S_FW_PARAMS_PARAM_X 16 +#define M_FW_PARAMS_PARAM_X 0xff +#define V_FW_PARAMS_PARAM_X(x) ((x) << S_FW_PARAMS_PARAM_X) +#define G_FW_PARAMS_PARAM_X(x) \ + (((x) >> S_FW_PARAMS_PARAM_X) & M_FW_PARAMS_PARAM_X) + +#define S_FW_PARAMS_PARAM_Y 8 +#define M_FW_PARAMS_PARAM_Y 0xff +#define V_FW_PARAMS_PARAM_Y(x) ((x) << S_FW_PARAMS_PARAM_Y) +#define G_FW_PARAMS_PARAM_Y(x) \ + (((x) >> S_FW_PARAMS_PARAM_Y) & M_FW_PARAMS_PARAM_Y) + +#define S_FW_PARAMS_PARAM_Z 0 +#define M_FW_PARAMS_PARAM_Z 0xff +#define V_FW_PARAMS_PARAM_Z(x) ((x) << S_FW_PARAMS_PARAM_Z) +#define G_FW_PARAMS_PARAM_Z(x) \ + (((x) >> S_FW_PARAMS_PARAM_Z) & M_FW_PARAMS_PARAM_Z) + +#define S_FW_PARAMS_PARAM_YZ 0 +#define M_FW_PARAMS_PARAM_YZ 0xffff +#define V_FW_PARAMS_PARAM_YZ(x) ((x) << S_FW_PARAMS_PARAM_YZ) +#define G_FW_PARAMS_PARAM_YZ(x) \ + (((x) >> S_FW_PARAMS_PARAM_YZ) & M_FW_PARAMS_PARAM_YZ) + +struct fw_params_cmd { + __be32 op_to_vfn; + __be32 retval_len16; + struct fw_params_param { + __be32 mnem; + __be32 val; + } param[7]; +}; + +#define S_FW_PARAMS_CMD_PFN 8 +#define M_FW_PARAMS_CMD_PFN 0x7 +#define V_FW_PARAMS_CMD_PFN(x) ((x) << S_FW_PARAMS_CMD_PFN) +#define G_FW_PARAMS_CMD_PFN(x) \ + (((x) >> S_FW_PARAMS_CMD_PFN) & M_FW_PARAMS_CMD_PFN) + +#define S_FW_PARAMS_CMD_VFN 0 +#define M_FW_PARAMS_CMD_VFN 0xff +#define V_FW_PARAMS_CMD_VFN(x) ((x) << S_FW_PARAMS_CMD_VFN) +#define G_FW_PARAMS_CMD_VFN(x) \ + (((x) >> S_FW_PARAMS_CMD_VFN) & M_FW_PARAMS_CMD_VFN) + +/* + * ingress queue type; the first 1K ingress queues can have associated 0, + * 1 or 2 free lists and an interrupt, all other ingress queues lack these + * capabilities + */ +enum fw_iq_type { + FW_IQ_TYPE_FL_INT_CAP, +}; + +struct fw_iq_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 physiqid; + __be16 iqid; + __be16 fl0id; + __be16 fl1id; + __be32 type_to_iqandstindex; + __be16 iqdroprss_to_iqesize; + __be16 iqsize; + __be64 iqaddr; + __be32 iqns_to_fl0congen; + __be16 fl0dcaen_to_fl0cidxfthresh; + __be16 fl0size; + __be64 fl0addr; + __be32 fl1cngchmap_to_fl1congen; + __be16 fl1dcaen_to_fl1cidxfthresh; + __be16 fl1size; + __be64 fl1addr; +}; + +#define S_FW_IQ_CMD_PFN 8 +#define M_FW_IQ_CMD_PFN 0x7 +#define V_FW_IQ_CMD_PFN(x) ((x) << S_FW_IQ_CMD_PFN) +#define G_FW_IQ_CMD_PFN(x) (((x) >> S_FW_IQ_CMD_PFN) & M_FW_IQ_CMD_PFN) + +#define S_FW_IQ_CMD_VFN 0 +#define M_FW_IQ_CMD_VFN 0xff +#define V_FW_IQ_CMD_VFN(x) ((x) << S_FW_IQ_CMD_VFN) +#define G_FW_IQ_CMD_VFN(x) (((x) >> S_FW_IQ_CMD_VFN) & M_FW_IQ_CMD_VFN) + +#define S_FW_IQ_CMD_ALLOC 31 +#define M_FW_IQ_CMD_ALLOC 0x1 +#define V_FW_IQ_CMD_ALLOC(x) ((x) << S_FW_IQ_CMD_ALLOC) +#define G_FW_IQ_CMD_ALLOC(x) \ + (((x) >> S_FW_IQ_CMD_ALLOC) & M_FW_IQ_CMD_ALLOC) +#define F_FW_IQ_CMD_ALLOC V_FW_IQ_CMD_ALLOC(1U) + +#define S_FW_IQ_CMD_FREE 30 +#define M_FW_IQ_CMD_FREE 0x1 +#define V_FW_IQ_CMD_FREE(x) ((x) << S_FW_IQ_CMD_FREE) +#define G_FW_IQ_CMD_FREE(x) (((x) >> S_FW_IQ_CMD_FREE) & M_FW_IQ_CMD_FREE) +#define F_FW_IQ_CMD_FREE V_FW_IQ_CMD_FREE(1U) + +#define S_FW_IQ_CMD_IQSTART 28 +#define M_FW_IQ_CMD_IQSTART 0x1 +#define V_FW_IQ_CMD_IQSTART(x) ((x) << S_FW_IQ_CMD_IQSTART) +#define G_FW_IQ_CMD_IQSTART(x) \ + (((x) >> S_FW_IQ_CMD_IQSTART) & M_FW_IQ_CMD_IQSTART) +#define F_FW_IQ_CMD_IQSTART V_FW_IQ_CMD_IQSTART(1U) + +#define S_FW_IQ_CMD_IQSTOP 27 +#define M_FW_IQ_CMD_IQSTOP 0x1 +#define V_FW_IQ_CMD_IQSTOP(x) ((x) << S_FW_IQ_CMD_IQSTOP) +#define G_FW_IQ_CMD_IQSTOP(x) \ + (((x) >> S_FW_IQ_CMD_IQSTOP) & M_FW_IQ_CMD_IQSTOP) +#define F_FW_IQ_CMD_IQSTOP V_FW_IQ_CMD_IQSTOP(1U) + +#define S_FW_IQ_CMD_TYPE 29 +#define M_FW_IQ_CMD_TYPE 0x7 +#define V_FW_IQ_CMD_TYPE(x) ((x) << S_FW_IQ_CMD_TYPE) +#define G_FW_IQ_CMD_TYPE(x) (((x) >> S_FW_IQ_CMD_TYPE) & M_FW_IQ_CMD_TYPE) + +#define S_FW_IQ_CMD_IQASYNCH 28 +#define M_FW_IQ_CMD_IQASYNCH 0x1 +#define V_FW_IQ_CMD_IQASYNCH(x) ((x) << S_FW_IQ_CMD_IQASYNCH) +#define G_FW_IQ_CMD_IQASYNCH(x) \ + (((x) >> S_FW_IQ_CMD_IQASYNCH) & M_FW_IQ_CMD_IQASYNCH) +#define F_FW_IQ_CMD_IQASYNCH V_FW_IQ_CMD_IQASYNCH(1U) + +#define S_FW_IQ_CMD_VIID 16 +#define M_FW_IQ_CMD_VIID 0xfff +#define V_FW_IQ_CMD_VIID(x) ((x) << S_FW_IQ_CMD_VIID) +#define G_FW_IQ_CMD_VIID(x) (((x) >> S_FW_IQ_CMD_VIID) & M_FW_IQ_CMD_VIID) + +#define S_FW_IQ_CMD_IQANDST 15 +#define M_FW_IQ_CMD_IQANDST 0x1 +#define V_FW_IQ_CMD_IQANDST(x) ((x) << S_FW_IQ_CMD_IQANDST) +#define G_FW_IQ_CMD_IQANDST(x) \ + (((x) >> S_FW_IQ_CMD_IQANDST) & M_FW_IQ_CMD_IQANDST) +#define F_FW_IQ_CMD_IQANDST V_FW_IQ_CMD_IQANDST(1U) + +#define S_FW_IQ_CMD_IQANUD 12 +#define M_FW_IQ_CMD_IQANUD 0x3 +#define V_FW_IQ_CMD_IQANUD(x) ((x) << S_FW_IQ_CMD_IQANUD) +#define G_FW_IQ_CMD_IQANUD(x) \ + (((x) >> S_FW_IQ_CMD_IQANUD) & M_FW_IQ_CMD_IQANUD) + +#define S_FW_IQ_CMD_IQANDSTINDEX 0 +#define M_FW_IQ_CMD_IQANDSTINDEX 0xfff +#define V_FW_IQ_CMD_IQANDSTINDEX(x) ((x) << S_FW_IQ_CMD_IQANDSTINDEX) +#define G_FW_IQ_CMD_IQANDSTINDEX(x) \ + (((x) >> S_FW_IQ_CMD_IQANDSTINDEX) & M_FW_IQ_CMD_IQANDSTINDEX) + +#define S_FW_IQ_CMD_IQGTSMODE 14 +#define M_FW_IQ_CMD_IQGTSMODE 0x1 +#define V_FW_IQ_CMD_IQGTSMODE(x) ((x) << S_FW_IQ_CMD_IQGTSMODE) +#define G_FW_IQ_CMD_IQGTSMODE(x) \ + (((x) >> S_FW_IQ_CMD_IQGTSMODE) & M_FW_IQ_CMD_IQGTSMODE) +#define F_FW_IQ_CMD_IQGTSMODE V_FW_IQ_CMD_IQGTSMODE(1U) + +#define S_FW_IQ_CMD_IQPCIECH 12 +#define M_FW_IQ_CMD_IQPCIECH 0x3 +#define V_FW_IQ_CMD_IQPCIECH(x) ((x) << S_FW_IQ_CMD_IQPCIECH) +#define G_FW_IQ_CMD_IQPCIECH(x) \ + (((x) >> S_FW_IQ_CMD_IQPCIECH) & M_FW_IQ_CMD_IQPCIECH) + +#define S_FW_IQ_CMD_IQINTCNTTHRESH 4 +#define M_FW_IQ_CMD_IQINTCNTTHRESH 0x3 +#define V_FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << S_FW_IQ_CMD_IQINTCNTTHRESH) +#define G_FW_IQ_CMD_IQINTCNTTHRESH(x) \ + (((x) >> S_FW_IQ_CMD_IQINTCNTTHRESH) & M_FW_IQ_CMD_IQINTCNTTHRESH) + +#define S_FW_IQ_CMD_IQESIZE 0 +#define M_FW_IQ_CMD_IQESIZE 0x3 +#define V_FW_IQ_CMD_IQESIZE(x) ((x) << S_FW_IQ_CMD_IQESIZE) +#define G_FW_IQ_CMD_IQESIZE(x) \ + (((x) >> S_FW_IQ_CMD_IQESIZE) & M_FW_IQ_CMD_IQESIZE) + +#define S_FW_IQ_CMD_IQFLINTCONGEN 27 +#define M_FW_IQ_CMD_IQFLINTCONGEN 0x1 +#define V_FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << S_FW_IQ_CMD_IQFLINTCONGEN) +#define G_FW_IQ_CMD_IQFLINTCONGEN(x) \ + (((x) >> S_FW_IQ_CMD_IQFLINTCONGEN) & M_FW_IQ_CMD_IQFLINTCONGEN) +#define F_FW_IQ_CMD_IQFLINTCONGEN V_FW_IQ_CMD_IQFLINTCONGEN(1U) + +#define S_FW_IQ_CMD_FL0CNGCHMAP 20 +#define M_FW_IQ_CMD_FL0CNGCHMAP 0xf +#define V_FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << S_FW_IQ_CMD_FL0CNGCHMAP) +#define G_FW_IQ_CMD_FL0CNGCHMAP(x) \ + (((x) >> S_FW_IQ_CMD_FL0CNGCHMAP) & M_FW_IQ_CMD_FL0CNGCHMAP) + +#define S_FW_IQ_CMD_FL0DATARO 12 +#define M_FW_IQ_CMD_FL0DATARO 0x1 +#define V_FW_IQ_CMD_FL0DATARO(x) ((x) << S_FW_IQ_CMD_FL0DATARO) +#define G_FW_IQ_CMD_FL0DATARO(x) \ + (((x) >> S_FW_IQ_CMD_FL0DATARO) & M_FW_IQ_CMD_FL0DATARO) +#define F_FW_IQ_CMD_FL0DATARO V_FW_IQ_CMD_FL0DATARO(1U) + +#define S_FW_IQ_CMD_FL0CONGCIF 11 +#define M_FW_IQ_CMD_FL0CONGCIF 0x1 +#define V_FW_IQ_CMD_FL0CONGCIF(x) ((x) << S_FW_IQ_CMD_FL0CONGCIF) +#define G_FW_IQ_CMD_FL0CONGCIF(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGCIF) & M_FW_IQ_CMD_FL0CONGCIF) +#define F_FW_IQ_CMD_FL0CONGCIF V_FW_IQ_CMD_FL0CONGCIF(1U) + +#define S_FW_IQ_CMD_FL0FETCHRO 6 +#define M_FW_IQ_CMD_FL0FETCHRO 0x1 +#define V_FW_IQ_CMD_FL0FETCHRO(x) ((x) << S_FW_IQ_CMD_FL0FETCHRO) +#define G_FW_IQ_CMD_FL0FETCHRO(x) \ + (((x) >> S_FW_IQ_CMD_FL0FETCHRO) & M_FW_IQ_CMD_FL0FETCHRO) +#define F_FW_IQ_CMD_FL0FETCHRO V_FW_IQ_CMD_FL0FETCHRO(1U) + +#define S_FW_IQ_CMD_FL0HOSTFCMODE 4 +#define M_FW_IQ_CMD_FL0HOSTFCMODE 0x3 +#define V_FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << S_FW_IQ_CMD_FL0HOSTFCMODE) +#define G_FW_IQ_CMD_FL0HOSTFCMODE(x) \ + (((x) >> S_FW_IQ_CMD_FL0HOSTFCMODE) & M_FW_IQ_CMD_FL0HOSTFCMODE) + +#define S_FW_IQ_CMD_FL0PADEN 2 +#define M_FW_IQ_CMD_FL0PADEN 0x1 +#define V_FW_IQ_CMD_FL0PADEN(x) ((x) << S_FW_IQ_CMD_FL0PADEN) +#define G_FW_IQ_CMD_FL0PADEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PADEN) & M_FW_IQ_CMD_FL0PADEN) +#define F_FW_IQ_CMD_FL0PADEN V_FW_IQ_CMD_FL0PADEN(1U) + +#define S_FW_IQ_CMD_FL0PACKEN 1 +#define M_FW_IQ_CMD_FL0PACKEN 0x1 +#define V_FW_IQ_CMD_FL0PACKEN(x) ((x) << S_FW_IQ_CMD_FL0PACKEN) +#define G_FW_IQ_CMD_FL0PACKEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0PACKEN) & M_FW_IQ_CMD_FL0PACKEN) +#define F_FW_IQ_CMD_FL0PACKEN V_FW_IQ_CMD_FL0PACKEN(1U) + +#define S_FW_IQ_CMD_FL0CONGEN 0 +#define M_FW_IQ_CMD_FL0CONGEN 0x1 +#define V_FW_IQ_CMD_FL0CONGEN(x) ((x) << S_FW_IQ_CMD_FL0CONGEN) +#define G_FW_IQ_CMD_FL0CONGEN(x) \ + (((x) >> S_FW_IQ_CMD_FL0CONGEN) & M_FW_IQ_CMD_FL0CONGEN) +#define F_FW_IQ_CMD_FL0CONGEN V_FW_IQ_CMD_FL0CONGEN(1U) + +#define S_FW_IQ_CMD_FL0FBMIN 7 +#define M_FW_IQ_CMD_FL0FBMIN 0x7 +#define V_FW_IQ_CMD_FL0FBMIN(x) ((x) << S_FW_IQ_CMD_FL0FBMIN) +#define G_FW_IQ_CMD_FL0FBMIN(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMIN) & M_FW_IQ_CMD_FL0FBMIN) + +#define S_FW_IQ_CMD_FL0FBMAX 4 +#define M_FW_IQ_CMD_FL0FBMAX 0x7 +#define V_FW_IQ_CMD_FL0FBMAX(x) ((x) << S_FW_IQ_CMD_FL0FBMAX) +#define G_FW_IQ_CMD_FL0FBMAX(x) \ + (((x) >> S_FW_IQ_CMD_FL0FBMAX) & M_FW_IQ_CMD_FL0FBMAX) + +struct fw_eq_eth_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be32 eqid_pkd; + __be32 physeqid_pkd; + __be32 fetchszm_to_iqid; + __be32 dcaen_to_eqsize; + __be64 eqaddr; + __be32 autoequiqe_to_viid; + __be32 r8_lo; + __be64 r9; +}; + +#define S_FW_EQ_ETH_CMD_PFN 8 +#define M_FW_EQ_ETH_CMD_PFN 0x7 +#define V_FW_EQ_ETH_CMD_PFN(x) ((x) << S_FW_EQ_ETH_CMD_PFN) +#define G_FW_EQ_ETH_CMD_PFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PFN) & M_FW_EQ_ETH_CMD_PFN) + +#define S_FW_EQ_ETH_CMD_VFN 0 +#define M_FW_EQ_ETH_CMD_VFN 0xff +#define V_FW_EQ_ETH_CMD_VFN(x) ((x) << S_FW_EQ_ETH_CMD_VFN) +#define G_FW_EQ_ETH_CMD_VFN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VFN) & M_FW_EQ_ETH_CMD_VFN) + +#define S_FW_EQ_ETH_CMD_ALLOC 31 +#define M_FW_EQ_ETH_CMD_ALLOC 0x1 +#define V_FW_EQ_ETH_CMD_ALLOC(x) ((x) << S_FW_EQ_ETH_CMD_ALLOC) +#define G_FW_EQ_ETH_CMD_ALLOC(x) \ + (((x) >> S_FW_EQ_ETH_CMD_ALLOC) & M_FW_EQ_ETH_CMD_ALLOC) +#define F_FW_EQ_ETH_CMD_ALLOC V_FW_EQ_ETH_CMD_ALLOC(1U) + +#define S_FW_EQ_ETH_CMD_FREE 30 +#define M_FW_EQ_ETH_CMD_FREE 0x1 +#define V_FW_EQ_ETH_CMD_FREE(x) ((x) << S_FW_EQ_ETH_CMD_FREE) +#define G_FW_EQ_ETH_CMD_FREE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FREE) & M_FW_EQ_ETH_CMD_FREE) +#define F_FW_EQ_ETH_CMD_FREE V_FW_EQ_ETH_CMD_FREE(1U) + +#define S_FW_EQ_ETH_CMD_EQSTART 28 +#define M_FW_EQ_ETH_CMD_EQSTART 0x1 +#define V_FW_EQ_ETH_CMD_EQSTART(x) ((x) << S_FW_EQ_ETH_CMD_EQSTART) +#define G_FW_EQ_ETH_CMD_EQSTART(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSTART) & M_FW_EQ_ETH_CMD_EQSTART) +#define F_FW_EQ_ETH_CMD_EQSTART V_FW_EQ_ETH_CMD_EQSTART(1U) + +#define S_FW_EQ_ETH_CMD_EQID 0 +#define M_FW_EQ_ETH_CMD_EQID 0xfffff +#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID) +#define G_FW_EQ_ETH_CMD_EQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID) + +#define S_FW_EQ_ETH_CMD_FETCHRO 22 +#define M_FW_EQ_ETH_CMD_FETCHRO 0x1 +#define V_FW_EQ_ETH_CMD_FETCHRO(x) ((x) << S_FW_EQ_ETH_CMD_FETCHRO) +#define G_FW_EQ_ETH_CMD_FETCHRO(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FETCHRO) & M_FW_EQ_ETH_CMD_FETCHRO) +#define F_FW_EQ_ETH_CMD_FETCHRO V_FW_EQ_ETH_CMD_FETCHRO(1U) + +#define S_FW_EQ_ETH_CMD_HOSTFCMODE 20 +#define M_FW_EQ_ETH_CMD_HOSTFCMODE 0x3 +#define V_FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << S_FW_EQ_ETH_CMD_HOSTFCMODE) +#define G_FW_EQ_ETH_CMD_HOSTFCMODE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_HOSTFCMODE) & M_FW_EQ_ETH_CMD_HOSTFCMODE) + +#define S_FW_EQ_ETH_CMD_PCIECHN 16 +#define M_FW_EQ_ETH_CMD_PCIECHN 0x3 +#define V_FW_EQ_ETH_CMD_PCIECHN(x) ((x) << S_FW_EQ_ETH_CMD_PCIECHN) +#define G_FW_EQ_ETH_CMD_PCIECHN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_PCIECHN) & M_FW_EQ_ETH_CMD_PCIECHN) + +#define S_FW_EQ_ETH_CMD_IQID 0 +#define M_FW_EQ_ETH_CMD_IQID 0xffff +#define V_FW_EQ_ETH_CMD_IQID(x) ((x) << S_FW_EQ_ETH_CMD_IQID) +#define G_FW_EQ_ETH_CMD_IQID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_IQID) & M_FW_EQ_ETH_CMD_IQID) + +#define S_FW_EQ_ETH_CMD_FBMIN 23 +#define M_FW_EQ_ETH_CMD_FBMIN 0x7 +#define V_FW_EQ_ETH_CMD_FBMIN(x) ((x) << S_FW_EQ_ETH_CMD_FBMIN) +#define G_FW_EQ_ETH_CMD_FBMIN(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMIN) & M_FW_EQ_ETH_CMD_FBMIN) + +#define S_FW_EQ_ETH_CMD_FBMAX 20 +#define M_FW_EQ_ETH_CMD_FBMAX 0x7 +#define V_FW_EQ_ETH_CMD_FBMAX(x) ((x) << S_FW_EQ_ETH_CMD_FBMAX) +#define G_FW_EQ_ETH_CMD_FBMAX(x) \ + (((x) >> S_FW_EQ_ETH_CMD_FBMAX) & M_FW_EQ_ETH_CMD_FBMAX) + +#define S_FW_EQ_ETH_CMD_CIDXFTHRESH 16 +#define M_FW_EQ_ETH_CMD_CIDXFTHRESH 0x7 +#define V_FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << S_FW_EQ_ETH_CMD_CIDXFTHRESH) +#define G_FW_EQ_ETH_CMD_CIDXFTHRESH(x) \ + (((x) >> S_FW_EQ_ETH_CMD_CIDXFTHRESH) & M_FW_EQ_ETH_CMD_CIDXFTHRESH) + +#define S_FW_EQ_ETH_CMD_EQSIZE 0 +#define M_FW_EQ_ETH_CMD_EQSIZE 0xffff +#define V_FW_EQ_ETH_CMD_EQSIZE(x) ((x) << S_FW_EQ_ETH_CMD_EQSIZE) +#define G_FW_EQ_ETH_CMD_EQSIZE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_EQSIZE) & M_FW_EQ_ETH_CMD_EQSIZE) + +#define S_FW_EQ_ETH_CMD_AUTOEQUEQE 30 +#define M_FW_EQ_ETH_CMD_AUTOEQUEQE 0x1 +#define V_FW_EQ_ETH_CMD_AUTOEQUEQE(x) ((x) << S_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define G_FW_EQ_ETH_CMD_AUTOEQUEQE(x) \ + (((x) >> S_FW_EQ_ETH_CMD_AUTOEQUEQE) & M_FW_EQ_ETH_CMD_AUTOEQUEQE) +#define F_FW_EQ_ETH_CMD_AUTOEQUEQE V_FW_EQ_ETH_CMD_AUTOEQUEQE(1U) + +#define S_FW_EQ_ETH_CMD_VIID 16 +#define M_FW_EQ_ETH_CMD_VIID 0xfff +#define V_FW_EQ_ETH_CMD_VIID(x) ((x) << S_FW_EQ_ETH_CMD_VIID) +#define G_FW_EQ_ETH_CMD_VIID(x) \ + (((x) >> S_FW_EQ_ETH_CMD_VIID) & M_FW_EQ_ETH_CMD_VIID) + +enum fw_vi_func { + FW_VI_FUNC_ETH, +}; + +struct fw_vi_cmd { + __be32 op_to_vfn; + __be32 alloc_to_len16; + __be16 type_to_viid; + __u8 mac[6]; + __u8 portid_pkd; + __u8 nmac; + __u8 nmac0[6]; + __be16 norss_rsssize; + __u8 nmac1[6]; + __be16 idsiiq_pkd; + __u8 nmac2[6]; + __be16 idseiq_pkd; + __u8 nmac3[6]; + __be64 r9; + __be64 r10; +}; + +#define S_FW_VI_CMD_PFN 8 +#define M_FW_VI_CMD_PFN 0x7 +#define V_FW_VI_CMD_PFN(x) ((x) << S_FW_VI_CMD_PFN) +#define G_FW_VI_CMD_PFN(x) (((x) >> S_FW_VI_CMD_PFN) & M_FW_VI_CMD_PFN) + +#define S_FW_VI_CMD_VFN 0 +#define M_FW_VI_CMD_VFN 0xff +#define V_FW_VI_CMD_VFN(x) ((x) << S_FW_VI_CMD_VFN) +#define G_FW_VI_CMD_VFN(x) (((x) >> S_FW_VI_CMD_VFN) & M_FW_VI_CMD_VFN) + +#define S_FW_VI_CMD_ALLOC 31 +#define M_FW_VI_CMD_ALLOC 0x1 +#define V_FW_VI_CMD_ALLOC(x) ((x) << S_FW_VI_CMD_ALLOC) +#define G_FW_VI_CMD_ALLOC(x) \ + (((x) >> S_FW_VI_CMD_ALLOC) & M_FW_VI_CMD_ALLOC) +#define F_FW_VI_CMD_ALLOC V_FW_VI_CMD_ALLOC(1U) + +#define S_FW_VI_CMD_FREE 30 +#define M_FW_VI_CMD_FREE 0x1 +#define V_FW_VI_CMD_FREE(x) ((x) << S_FW_VI_CMD_FREE) +#define G_FW_VI_CMD_FREE(x) (((x) >> S_FW_VI_CMD_FREE) & M_FW_VI_CMD_FREE) +#define F_FW_VI_CMD_FREE V_FW_VI_CMD_FREE(1U) + +#define S_FW_VI_CMD_TYPE 15 +#define M_FW_VI_CMD_TYPE 0x1 +#define V_FW_VI_CMD_TYPE(x) ((x) << S_FW_VI_CMD_TYPE) +#define G_FW_VI_CMD_TYPE(x) (((x) >> S_FW_VI_CMD_TYPE) & M_FW_VI_CMD_TYPE) +#define F_FW_VI_CMD_TYPE V_FW_VI_CMD_TYPE(1U) + +#define S_FW_VI_CMD_FUNC 12 +#define M_FW_VI_CMD_FUNC 0x7 +#define V_FW_VI_CMD_FUNC(x) ((x) << S_FW_VI_CMD_FUNC) +#define G_FW_VI_CMD_FUNC(x) (((x) >> S_FW_VI_CMD_FUNC) & M_FW_VI_CMD_FUNC) + +#define S_FW_VI_CMD_VIID 0 +#define M_FW_VI_CMD_VIID 0xfff +#define V_FW_VI_CMD_VIID(x) ((x) << S_FW_VI_CMD_VIID) +#define G_FW_VI_CMD_VIID(x) (((x) >> S_FW_VI_CMD_VIID) & M_FW_VI_CMD_VIID) + +#define S_FW_VI_CMD_PORTID 4 +#define M_FW_VI_CMD_PORTID 0xf +#define V_FW_VI_CMD_PORTID(x) ((x) << S_FW_VI_CMD_PORTID) +#define G_FW_VI_CMD_PORTID(x) \ + (((x) >> S_FW_VI_CMD_PORTID) & M_FW_VI_CMD_PORTID) + +#define S_FW_VI_CMD_RSSSIZE 0 +#define M_FW_VI_CMD_RSSSIZE 0x7ff +#define V_FW_VI_CMD_RSSSIZE(x) ((x) << S_FW_VI_CMD_RSSSIZE) +#define G_FW_VI_CMD_RSSSIZE(x) \ + (((x) >> S_FW_VI_CMD_RSSSIZE) & M_FW_VI_CMD_RSSSIZE) + +/* Special VI_MAC command index ids */ +#define FW_VI_MAC_ADD_MAC 0x3FF +#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE + +enum fw_vi_mac_smac { + FW_VI_MAC_MPS_TCAM_ENTRY, + FW_VI_MAC_SMT_AND_MPSTCAM +}; + +struct fw_vi_mac_cmd { + __be32 op_to_viid; + __be32 freemacs_to_len16; + union fw_vi_mac { + struct fw_vi_mac_exact { + __be16 valid_to_idx; + __u8 macaddr[6]; + } exact[7]; + struct fw_vi_mac_hash { + __be64 hashvec; + } hash; + } u; +}; + +#define S_FW_VI_MAC_CMD_VIID 0 +#define M_FW_VI_MAC_CMD_VIID 0xfff +#define V_FW_VI_MAC_CMD_VIID(x) ((x) << S_FW_VI_MAC_CMD_VIID) +#define G_FW_VI_MAC_CMD_VIID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VIID) & M_FW_VI_MAC_CMD_VIID) + +#define S_FW_VI_MAC_CMD_VALID 15 +#define M_FW_VI_MAC_CMD_VALID 0x1 +#define V_FW_VI_MAC_CMD_VALID(x) ((x) << S_FW_VI_MAC_CMD_VALID) +#define G_FW_VI_MAC_CMD_VALID(x) \ + (((x) >> S_FW_VI_MAC_CMD_VALID) & M_FW_VI_MAC_CMD_VALID) +#define F_FW_VI_MAC_CMD_VALID V_FW_VI_MAC_CMD_VALID(1U) + +#define S_FW_VI_MAC_CMD_SMAC_RESULT 10 +#define M_FW_VI_MAC_CMD_SMAC_RESULT 0x3 +#define V_FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << S_FW_VI_MAC_CMD_SMAC_RESULT) +#define G_FW_VI_MAC_CMD_SMAC_RESULT(x) \ + (((x) >> S_FW_VI_MAC_CMD_SMAC_RESULT) & M_FW_VI_MAC_CMD_SMAC_RESULT) + +#define S_FW_VI_MAC_CMD_IDX 0 +#define M_FW_VI_MAC_CMD_IDX 0x3ff +#define V_FW_VI_MAC_CMD_IDX(x) ((x) << S_FW_VI_MAC_CMD_IDX) +#define G_FW_VI_MAC_CMD_IDX(x) \ + (((x) >> S_FW_VI_MAC_CMD_IDX) & M_FW_VI_MAC_CMD_IDX) + +struct fw_vi_rxmode_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be32 mtu_to_vlanexen; + __be32 r4_lo; +}; + +#define S_FW_VI_RXMODE_CMD_VIID 0 +#define M_FW_VI_RXMODE_CMD_VIID 0xfff +#define V_FW_VI_RXMODE_CMD_VIID(x) ((x) << S_FW_VI_RXMODE_CMD_VIID) +#define G_FW_VI_RXMODE_CMD_VIID(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VIID) & M_FW_VI_RXMODE_CMD_VIID) + +#define S_FW_VI_RXMODE_CMD_MTU 16 +#define M_FW_VI_RXMODE_CMD_MTU 0xffff +#define V_FW_VI_RXMODE_CMD_MTU(x) ((x) << S_FW_VI_RXMODE_CMD_MTU) +#define G_FW_VI_RXMODE_CMD_MTU(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_MTU) & M_FW_VI_RXMODE_CMD_MTU) + +#define S_FW_VI_RXMODE_CMD_PROMISCEN 14 +#define M_FW_VI_RXMODE_CMD_PROMISCEN 0x3 +#define V_FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << S_FW_VI_RXMODE_CMD_PROMISCEN) +#define G_FW_VI_RXMODE_CMD_PROMISCEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_PROMISCEN) & M_FW_VI_RXMODE_CMD_PROMISCEN) + +#define S_FW_VI_RXMODE_CMD_ALLMULTIEN 12 +#define M_FW_VI_RXMODE_CMD_ALLMULTIEN 0x3 +#define V_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_ALLMULTIEN) +#define G_FW_VI_RXMODE_CMD_ALLMULTIEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_ALLMULTIEN) & M_FW_VI_RXMODE_CMD_ALLMULTIEN) + +#define S_FW_VI_RXMODE_CMD_BROADCASTEN 10 +#define M_FW_VI_RXMODE_CMD_BROADCASTEN 0x3 +#define V_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + ((x) << S_FW_VI_RXMODE_CMD_BROADCASTEN) +#define G_FW_VI_RXMODE_CMD_BROADCASTEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_BROADCASTEN) & \ + M_FW_VI_RXMODE_CMD_BROADCASTEN) + +#define S_FW_VI_RXMODE_CMD_VLANEXEN 8 +#define M_FW_VI_RXMODE_CMD_VLANEXEN 0x3 +#define V_FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << S_FW_VI_RXMODE_CMD_VLANEXEN) +#define G_FW_VI_RXMODE_CMD_VLANEXEN(x) \ + (((x) >> S_FW_VI_RXMODE_CMD_VLANEXEN) & M_FW_VI_RXMODE_CMD_VLANEXEN) + +struct fw_vi_enable_cmd { + __be32 op_to_viid; + __be32 ien_to_len16; + __be16 blinkdur; + __be16 r3; + __be32 r4; +}; + +#define S_FW_VI_ENABLE_CMD_VIID 0 +#define M_FW_VI_ENABLE_CMD_VIID 0xfff +#define V_FW_VI_ENABLE_CMD_VIID(x) ((x) << S_FW_VI_ENABLE_CMD_VIID) +#define G_FW_VI_ENABLE_CMD_VIID(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_VIID) & M_FW_VI_ENABLE_CMD_VIID) + +#define S_FW_VI_ENABLE_CMD_IEN 31 +#define M_FW_VI_ENABLE_CMD_IEN 0x1 +#define V_FW_VI_ENABLE_CMD_IEN(x) ((x) << S_FW_VI_ENABLE_CMD_IEN) +#define G_FW_VI_ENABLE_CMD_IEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_IEN) & M_FW_VI_ENABLE_CMD_IEN) +#define F_FW_VI_ENABLE_CMD_IEN V_FW_VI_ENABLE_CMD_IEN(1U) + +#define S_FW_VI_ENABLE_CMD_EEN 30 +#define M_FW_VI_ENABLE_CMD_EEN 0x1 +#define V_FW_VI_ENABLE_CMD_EEN(x) ((x) << S_FW_VI_ENABLE_CMD_EEN) +#define G_FW_VI_ENABLE_CMD_EEN(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_EEN) & M_FW_VI_ENABLE_CMD_EEN) +#define F_FW_VI_ENABLE_CMD_EEN V_FW_VI_ENABLE_CMD_EEN(1U) + +#define S_FW_VI_ENABLE_CMD_DCB_INFO 28 +#define M_FW_VI_ENABLE_CMD_DCB_INFO 0x1 +#define V_FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << S_FW_VI_ENABLE_CMD_DCB_INFO) +#define G_FW_VI_ENABLE_CMD_DCB_INFO(x) \ + (((x) >> S_FW_VI_ENABLE_CMD_DCB_INFO) & M_FW_VI_ENABLE_CMD_DCB_INFO) +#define F_FW_VI_ENABLE_CMD_DCB_INFO V_FW_VI_ENABLE_CMD_DCB_INFO(1U) + +/* VI PF stats offset definitions */ +#define VI_PF_NUM_STATS 17 +enum fw_vi_stats_pf_index { + FW_VI_PF_STAT_TX_BCAST_BYTES_IX, + FW_VI_PF_STAT_TX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_MCAST_BYTES_IX, + FW_VI_PF_STAT_TX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_UCAST_BYTES_IX, + FW_VI_PF_STAT_TX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_TX_OFLD_BYTES_IX, + FW_VI_PF_STAT_TX_OFLD_FRAMES_IX, + FW_VI_PF_STAT_RX_BYTES_IX, + FW_VI_PF_STAT_RX_FRAMES_IX, + FW_VI_PF_STAT_RX_BCAST_BYTES_IX, + FW_VI_PF_STAT_RX_BCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_MCAST_BYTES_IX, + FW_VI_PF_STAT_RX_MCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_UCAST_BYTES_IX, + FW_VI_PF_STAT_RX_UCAST_FRAMES_IX, + FW_VI_PF_STAT_RX_ERR_FRAMES_IX +}; + +struct fw_vi_stats_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_vi_stats { + struct fw_vi_stats_ctl { + __be16 nstats_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_vi_stats_pf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_pf_bytes; + __be64 rx_pf_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } pf; + struct fw_vi_stats_vf { + __be64 tx_bcast_bytes; + __be64 tx_bcast_frames; + __be64 tx_mcast_bytes; + __be64 tx_mcast_frames; + __be64 tx_ucast_bytes; + __be64 tx_ucast_frames; + __be64 tx_drop_frames; + __be64 tx_offload_bytes; + __be64 tx_offload_frames; + __be64 rx_bcast_bytes; + __be64 rx_bcast_frames; + __be64 rx_mcast_bytes; + __be64 rx_mcast_frames; + __be64 rx_ucast_bytes; + __be64 rx_ucast_frames; + __be64 rx_err_frames; + } vf; + } u; +}; + +/* port capabilities bitmap */ +enum fw_port_cap { + FW_PORT_CAP_SPEED_100M = 0x0001, + FW_PORT_CAP_SPEED_1G = 0x0002, + FW_PORT_CAP_SPEED_2_5G = 0x0004, + FW_PORT_CAP_SPEED_10G = 0x0008, + FW_PORT_CAP_SPEED_40G = 0x0010, + FW_PORT_CAP_SPEED_100G = 0x0020, + FW_PORT_CAP_FC_RX = 0x0040, + FW_PORT_CAP_FC_TX = 0x0080, + FW_PORT_CAP_ANEG = 0x0100, + FW_PORT_CAP_MDIX = 0x0200, + FW_PORT_CAP_MDIAUTO = 0x0400, + FW_PORT_CAP_FEC = 0x0800, + FW_PORT_CAP_TECHKR = 0x1000, + FW_PORT_CAP_TECHKX4 = 0x2000, + FW_PORT_CAP_802_3_PAUSE = 0x4000, + FW_PORT_CAP_802_3_ASM_DIR = 0x8000, +}; + +enum fw_port_mdi { + FW_PORT_CAP_MDI_AUTO, +}; + +#define S_FW_PORT_CAP_MDI 9 +#define M_FW_PORT_CAP_MDI 3 +#define V_FW_PORT_CAP_MDI(x) ((x) << S_FW_PORT_CAP_MDI) +#define G_FW_PORT_CAP_MDI(x) (((x) >> S_FW_PORT_CAP_MDI) & M_FW_PORT_CAP_MDI) + +enum fw_port_action { + FW_PORT_ACTION_L1_CFG = 0x0001, + FW_PORT_ACTION_GET_PORT_INFO = 0x0003, +}; + +struct fw_port_cmd { + __be32 op_to_portid; + __be32 action_to_len16; + union fw_port { + struct fw_port_l1cfg { + __be32 rcap; + __be32 r; + } l1cfg; + struct fw_port_l2cfg { + __u8 ctlbf; + __u8 ovlan3_to_ivlan0; + __be16 ivlantype; + __be16 txipg_force_pinfo; + __be16 mtu; + __be16 ovlan0mask; + __be16 ovlan0type; + __be16 ovlan1mask; + __be16 ovlan1type; + __be16 ovlan2mask; + __be16 ovlan2type; + __be16 ovlan3mask; + __be16 ovlan3type; + } l2cfg; + struct fw_port_info { + __be32 lstatus_to_modtype; + __be16 pcap; + __be16 acap; + __be16 mtu; + __u8 cbllen; + __u8 auxlinfo; + __u8 dcbxdis_pkd; + __u8 r8_lo; + __be16 lpacap; + __be64 r9; + } info; + struct fw_port_diags { + __u8 diagop; + __u8 r[3]; + __be32 diagval; + } diags; + union fw_port_dcb { + struct fw_port_dcb_pgid { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[2]; + __be32 pgid; + __be64 r11; + } pgid; + struct fw_port_dcb_pgrate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[5]; + __u8 num_tcs_supported; + __u8 pgrate[8]; + __u8 tsa[8]; + } pgrate; + struct fw_port_dcb_priorate { + __u8 type; + __u8 apply_pkd; + __u8 r10_lo[6]; + __u8 strict_priorate[8]; + } priorate; + struct fw_port_dcb_pfc { + __u8 type; + __u8 pfcen; + __u8 r10[5]; + __u8 max_pfc_tcs; + __be64 r11; + } pfc; + struct fw_port_app_priority { + __u8 type; + __u8 r10[2]; + __u8 idx; + __u8 user_prio_map; + __u8 sel_field; + __be16 protocolid; + __be64 r12; + } app_priority; + struct fw_port_dcb_control { + __u8 type; + __u8 all_syncd_pkd; + __be16 dcb_version_to_app_state; + __be32 r11; + __be64 r12; + } control; + } dcb; + } u; +}; + +#define S_FW_PORT_CMD_PORTID 0 +#define M_FW_PORT_CMD_PORTID 0xf +#define V_FW_PORT_CMD_PORTID(x) ((x) << S_FW_PORT_CMD_PORTID) +#define G_FW_PORT_CMD_PORTID(x) \ + (((x) >> S_FW_PORT_CMD_PORTID) & M_FW_PORT_CMD_PORTID) + +#define S_FW_PORT_CMD_ACTION 16 +#define M_FW_PORT_CMD_ACTION 0xffff +#define V_FW_PORT_CMD_ACTION(x) ((x) << S_FW_PORT_CMD_ACTION) +#define G_FW_PORT_CMD_ACTION(x) \ + (((x) >> S_FW_PORT_CMD_ACTION) & M_FW_PORT_CMD_ACTION) + +#define S_FW_PORT_CMD_LSTATUS 31 +#define M_FW_PORT_CMD_LSTATUS 0x1 +#define V_FW_PORT_CMD_LSTATUS(x) ((x) << S_FW_PORT_CMD_LSTATUS) +#define G_FW_PORT_CMD_LSTATUS(x) \ + (((x) >> S_FW_PORT_CMD_LSTATUS) & M_FW_PORT_CMD_LSTATUS) +#define F_FW_PORT_CMD_LSTATUS V_FW_PORT_CMD_LSTATUS(1U) + +#define S_FW_PORT_CMD_LSPEED 24 +#define M_FW_PORT_CMD_LSPEED 0x3f +#define V_FW_PORT_CMD_LSPEED(x) ((x) << S_FW_PORT_CMD_LSPEED) +#define G_FW_PORT_CMD_LSPEED(x) \ + (((x) >> S_FW_PORT_CMD_LSPEED) & M_FW_PORT_CMD_LSPEED) + +#define S_FW_PORT_CMD_TXPAUSE 23 +#define M_FW_PORT_CMD_TXPAUSE 0x1 +#define V_FW_PORT_CMD_TXPAUSE(x) ((x) << S_FW_PORT_CMD_TXPAUSE) +#define G_FW_PORT_CMD_TXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_TXPAUSE) & M_FW_PORT_CMD_TXPAUSE) +#define F_FW_PORT_CMD_TXPAUSE V_FW_PORT_CMD_TXPAUSE(1U) + +#define S_FW_PORT_CMD_RXPAUSE 22 +#define M_FW_PORT_CMD_RXPAUSE 0x1 +#define V_FW_PORT_CMD_RXPAUSE(x) ((x) << S_FW_PORT_CMD_RXPAUSE) +#define G_FW_PORT_CMD_RXPAUSE(x) \ + (((x) >> S_FW_PORT_CMD_RXPAUSE) & M_FW_PORT_CMD_RXPAUSE) +#define F_FW_PORT_CMD_RXPAUSE V_FW_PORT_CMD_RXPAUSE(1U) + +#define S_FW_PORT_CMD_MDIOCAP 21 +#define M_FW_PORT_CMD_MDIOCAP 0x1 +#define V_FW_PORT_CMD_MDIOCAP(x) ((x) << S_FW_PORT_CMD_MDIOCAP) +#define G_FW_PORT_CMD_MDIOCAP(x) \ + (((x) >> S_FW_PORT_CMD_MDIOCAP) & M_FW_PORT_CMD_MDIOCAP) +#define F_FW_PORT_CMD_MDIOCAP V_FW_PORT_CMD_MDIOCAP(1U) + +#define S_FW_PORT_CMD_MDIOADDR 16 +#define M_FW_PORT_CMD_MDIOADDR 0x1f +#define V_FW_PORT_CMD_MDIOADDR(x) ((x) << S_FW_PORT_CMD_MDIOADDR) +#define G_FW_PORT_CMD_MDIOADDR(x) \ + (((x) >> S_FW_PORT_CMD_MDIOADDR) & M_FW_PORT_CMD_MDIOADDR) + +#define S_FW_PORT_CMD_PTYPE 8 +#define M_FW_PORT_CMD_PTYPE 0x1f +#define V_FW_PORT_CMD_PTYPE(x) ((x) << S_FW_PORT_CMD_PTYPE) +#define G_FW_PORT_CMD_PTYPE(x) \ + (((x) >> S_FW_PORT_CMD_PTYPE) & M_FW_PORT_CMD_PTYPE) + +#define S_FW_PORT_CMD_LINKDNRC 5 +#define M_FW_PORT_CMD_LINKDNRC 0x7 +#define V_FW_PORT_CMD_LINKDNRC(x) ((x) << S_FW_PORT_CMD_LINKDNRC) +#define G_FW_PORT_CMD_LINKDNRC(x) \ + (((x) >> S_FW_PORT_CMD_LINKDNRC) & M_FW_PORT_CMD_LINKDNRC) + +#define S_FW_PORT_CMD_MODTYPE 0 +#define M_FW_PORT_CMD_MODTYPE 0x1f +#define V_FW_PORT_CMD_MODTYPE(x) ((x) << S_FW_PORT_CMD_MODTYPE) +#define G_FW_PORT_CMD_MODTYPE(x) \ + (((x) >> S_FW_PORT_CMD_MODTYPE) & M_FW_PORT_CMD_MODTYPE) + +/* + * These are configured into the VPD and hence tools that generate + * VPD may use this enumeration. + * extPHY #lanes T4_I2C extI2C BP_Eq BP_ANEG Speed + * + * REMEMBER: + * Update the Common Code t4_hw.c:t4_get_port_type_description() + * with any new Firmware Port Technology Types! + */ +enum fw_port_type { + FW_PORT_TYPE_FIBER_XFI = 0, /* Y, 1, N, Y, N, N, 10G */ + FW_PORT_TYPE_FIBER_XAUI = 1, /* Y, 4, N, Y, N, N, 10G */ + FW_PORT_TYPE_BT_SGMII = 2, /* Y, 1, No, No, No, No, 1G/100M */ + FW_PORT_TYPE_BT_XFI = 3, /* Y, 1, No, No, No, No, 10G */ + FW_PORT_TYPE_BT_XAUI = 4, /* Y, 4, No, No, No, No, 10G/1G/100M? */ + FW_PORT_TYPE_KX4 = 5, /* No, 4, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_CX4 = 6, /* No, 4, No, No, No, No, 10G */ + FW_PORT_TYPE_KX = 7, /* No, 1, No, No, Yes, No, 1G */ + FW_PORT_TYPE_KR = 8, /* No, 1, No, No, Yes, Yes, 10G */ + FW_PORT_TYPE_SFP = 9, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_BP_AP = 10, + /* No, 1, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_BP4_AP = 11, + /* No, 4, No, No, Yes, Yes, 10G, BP ANGE */ + FW_PORT_TYPE_QSFP_10G = 12, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSA = 13, /* No, 1, Yes, No, No, No, 10G */ + FW_PORT_TYPE_QSFP = 14, /* No, 4, Yes, No, No, No, 40G */ + FW_PORT_TYPE_BP40_BA = 15, + /* No, 4, No, No, Yes, Yes, 40G/10G/1G, BP ANGE */ + + FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE +}; + +/* These are read from module's EEPROM and determined once the + * module is inserted. + */ +enum fw_port_module_type { + FW_PORT_MOD_TYPE_NA = 0x0, + FW_PORT_MOD_TYPE_LR = 0x1, + FW_PORT_MOD_TYPE_SR = 0x2, + FW_PORT_MOD_TYPE_ER = 0x3, + FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4, + FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5, + FW_PORT_MOD_TYPE_LRM = 0x6, + FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3, + FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2, + FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1, + FW_PORT_MOD_TYPE_NONE = M_FW_PORT_CMD_MODTYPE +}; + +/* used by FW and tools may use this to generate VPD */ +enum fw_port_mod_sub_type { + FW_PORT_MOD_SUB_TYPE_NA, + FW_PORT_MOD_SUB_TYPE_MV88E114X = 0x1, + FW_PORT_MOD_SUB_TYPE_TN8022 = 0x2, + FW_PORT_MOD_SUB_TYPE_AQ1202 = 0x3, + FW_PORT_MOD_SUB_TYPE_88x3120 = 0x4, + FW_PORT_MOD_SUB_TYPE_BCM84834 = 0x5, + FW_PORT_MOD_SUB_TYPE_BCM5482 = 0x6, + FW_PORT_MOD_SUB_TYPE_BCM84856 = 0x7, + FW_PORT_MOD_SUB_TYPE_BT_VSC8634 = 0x8, + + /* + * The following will never been in the VPD. They are TWINAX cable + * lengths decoded from SFP+ module i2c PROMs. These should almost + * certainly go somewhere else ... + */ + FW_PORT_MOD_SUB_TYPE_TWINAX_1 = 0x9, + FW_PORT_MOD_SUB_TYPE_TWINAX_3 = 0xA, + FW_PORT_MOD_SUB_TYPE_TWINAX_5 = 0xB, + FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC, +}; + +/* link down reason codes (3b) */ +enum fw_port_link_dn_rc { + FW_PORT_LINK_DN_RC_NONE, + FW_PORT_LINK_DN_RC_REMFLT, /* Remote fault detected */ + FW_PORT_LINK_DN_ANEG_F, /* Auto-negotiation fault */ + FW_PORT_LINK_DN_RESERVED3, + FW_PORT_LINK_DN_OVERHEAT, /* Port overheated */ + FW_PORT_LINK_DN_UNKNOWN, /* Unable to determine reason */ + FW_PORT_LINK_DN_RX_LOS, /* No RX signal detected */ + FW_PORT_LINK_DN_RESERVED7 +}; + +/* port stats */ +#define FW_NUM_PORT_STATS 50 +#define FW_NUM_PORT_TX_STATS 23 +#define FW_NUM_PORT_RX_STATS 27 + +enum fw_port_stats_tx_index { + FW_STAT_TX_PORT_BYTES_IX, + FW_STAT_TX_PORT_FRAMES_IX, + FW_STAT_TX_PORT_BCAST_IX, + FW_STAT_TX_PORT_MCAST_IX, + FW_STAT_TX_PORT_UCAST_IX, + FW_STAT_TX_PORT_ERROR_IX, + FW_STAT_TX_PORT_64B_IX, + FW_STAT_TX_PORT_65B_127B_IX, + FW_STAT_TX_PORT_128B_255B_IX, + FW_STAT_TX_PORT_256B_511B_IX, + FW_STAT_TX_PORT_512B_1023B_IX, + FW_STAT_TX_PORT_1024B_1518B_IX, + FW_STAT_TX_PORT_1519B_MAX_IX, + FW_STAT_TX_PORT_DROP_IX, + FW_STAT_TX_PORT_PAUSE_IX, + FW_STAT_TX_PORT_PPP0_IX, + FW_STAT_TX_PORT_PPP1_IX, + FW_STAT_TX_PORT_PPP2_IX, + FW_STAT_TX_PORT_PPP3_IX, + FW_STAT_TX_PORT_PPP4_IX, + FW_STAT_TX_PORT_PPP5_IX, + FW_STAT_TX_PORT_PPP6_IX, + FW_STAT_TX_PORT_PPP7_IX +}; + +enum fw_port_stat_rx_index { + FW_STAT_RX_PORT_BYTES_IX, + FW_STAT_RX_PORT_FRAMES_IX, + FW_STAT_RX_PORT_BCAST_IX, + FW_STAT_RX_PORT_MCAST_IX, + FW_STAT_RX_PORT_UCAST_IX, + FW_STAT_RX_PORT_MTU_ERROR_IX, + FW_STAT_RX_PORT_MTU_CRC_ERROR_IX, + FW_STAT_RX_PORT_CRC_ERROR_IX, + FW_STAT_RX_PORT_LEN_ERROR_IX, + FW_STAT_RX_PORT_SYM_ERROR_IX, + FW_STAT_RX_PORT_64B_IX, + FW_STAT_RX_PORT_65B_127B_IX, + FW_STAT_RX_PORT_128B_255B_IX, + FW_STAT_RX_PORT_256B_511B_IX, + FW_STAT_RX_PORT_512B_1023B_IX, + FW_STAT_RX_PORT_1024B_1518B_IX, + FW_STAT_RX_PORT_1519B_MAX_IX, + FW_STAT_RX_PORT_PAUSE_IX, + FW_STAT_RX_PORT_PPP0_IX, + FW_STAT_RX_PORT_PPP1_IX, + FW_STAT_RX_PORT_PPP2_IX, + FW_STAT_RX_PORT_PPP3_IX, + FW_STAT_RX_PORT_PPP4_IX, + FW_STAT_RX_PORT_PPP5_IX, + FW_STAT_RX_PORT_PPP6_IX, + FW_STAT_RX_PORT_PPP7_IX, + FW_STAT_RX_PORT_LESS_64B_IX +}; + +struct fw_port_stats_cmd { + __be32 op_to_portid; + __be32 retval_len16; + union fw_port_stats { + struct fw_port_stats_ctl { + __u8 nstats_bg_bm; + __u8 tx_ix; + __be16 r6; + __be32 r7; + __be64 stat0; + __be64 stat1; + __be64 stat2; + __be64 stat3; + __be64 stat4; + __be64 stat5; + } ctl; + struct fw_port_stats_all { + __be64 tx_bytes; + __be64 tx_frames; + __be64 tx_bcast; + __be64 tx_mcast; + __be64 tx_ucast; + __be64 tx_error; + __be64 tx_64b; + __be64 tx_65b_127b; + __be64 tx_128b_255b; + __be64 tx_256b_511b; + __be64 tx_512b_1023b; + __be64 tx_1024b_1518b; + __be64 tx_1519b_max; + __be64 tx_drop; + __be64 tx_pause; + __be64 tx_ppp0; + __be64 tx_ppp1; + __be64 tx_ppp2; + __be64 tx_ppp3; + __be64 tx_ppp4; + __be64 tx_ppp5; + __be64 tx_ppp6; + __be64 tx_ppp7; + __be64 rx_bytes; + __be64 rx_frames; + __be64 rx_bcast; + __be64 rx_mcast; + __be64 rx_ucast; + __be64 rx_mtu_error; + __be64 rx_mtu_crc_error; + __be64 rx_crc_error; + __be64 rx_len_error; + __be64 rx_sym_error; + __be64 rx_64b; + __be64 rx_65b_127b; + __be64 rx_128b_255b; + __be64 rx_256b_511b; + __be64 rx_512b_1023b; + __be64 rx_1024b_1518b; + __be64 rx_1519b_max; + __be64 rx_pause; + __be64 rx_ppp0; + __be64 rx_ppp1; + __be64 rx_ppp2; + __be64 rx_ppp3; + __be64 rx_ppp4; + __be64 rx_ppp5; + __be64 rx_ppp6; + __be64 rx_ppp7; + __be64 rx_less_64b; + __be64 rx_bg_drop; + __be64 rx_bg_trunc; + } all; + } u; +}; + +struct fw_rss_ind_tbl_cmd { + __be32 op_to_viid; + __be32 retval_len16; + __be16 niqid; + __be16 startidx; + __be32 r3; + __be32 iq0_to_iq2; + __be32 iq3_to_iq5; + __be32 iq6_to_iq8; + __be32 iq9_to_iq11; + __be32 iq12_to_iq14; + __be32 iq15_to_iq17; + __be32 iq18_to_iq20; + __be32 iq21_to_iq23; + __be32 iq24_to_iq26; + __be32 iq27_to_iq29; + __be32 iq30_iq31; + __be32 r15_lo; +}; + +#define S_FW_RSS_IND_TBL_CMD_VIID 0 +#define M_FW_RSS_IND_TBL_CMD_VIID 0xfff +#define V_FW_RSS_IND_TBL_CMD_VIID(x) ((x) << S_FW_RSS_IND_TBL_CMD_VIID) +#define G_FW_RSS_IND_TBL_CMD_VIID(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_VIID) & M_FW_RSS_IND_TBL_CMD_VIID) + +#define S_FW_RSS_IND_TBL_CMD_IQ0 20 +#define M_FW_RSS_IND_TBL_CMD_IQ0 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ0) +#define G_FW_RSS_IND_TBL_CMD_IQ0(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ0) & M_FW_RSS_IND_TBL_CMD_IQ0) + +#define S_FW_RSS_IND_TBL_CMD_IQ1 10 +#define M_FW_RSS_IND_TBL_CMD_IQ1 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ1) +#define G_FW_RSS_IND_TBL_CMD_IQ1(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ1) & M_FW_RSS_IND_TBL_CMD_IQ1) + +#define S_FW_RSS_IND_TBL_CMD_IQ2 0 +#define M_FW_RSS_IND_TBL_CMD_IQ2 0x3ff +#define V_FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << S_FW_RSS_IND_TBL_CMD_IQ2) +#define G_FW_RSS_IND_TBL_CMD_IQ2(x) \ + (((x) >> S_FW_RSS_IND_TBL_CMD_IQ2) & M_FW_RSS_IND_TBL_CMD_IQ2) + +struct fw_rss_vi_config_cmd { + __be32 op_to_viid; + __be32 retval_len16; + union fw_rss_vi_config { + struct fw_rss_vi_config_manual { + __be64 r3; + __be64 r4; + __be64 r5; + } manual; + struct fw_rss_vi_config_basicvirtual { + __be32 r6; + __be32 defaultq_to_udpen; + __be64 r9; + __be64 r10; + } basicvirtual; + } u; +}; + +#define S_FW_RSS_VI_CONFIG_CMD_VIID 0 +#define M_FW_RSS_VI_CONFIG_CMD_VIID 0xfff +#define V_FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_VIID) +#define G_FW_RSS_VI_CONFIG_CMD_VIID(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_VIID) & M_FW_RSS_VI_CONFIG_CMD_VIID) + +#define S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 16 +#define M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ 0x3ff +#define V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) +#define G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) & \ + M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 4 +#define M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 3 +#define M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 2 +#define M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 1 +#define M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + ((x) << S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define G_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) & \ + M_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) +#define F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN \ + V_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN(1U) + +#define S_FW_RSS_VI_CONFIG_CMD_UDPEN 0 +#define M_FW_RSS_VI_CONFIG_CMD_UDPEN 0x1 +#define V_FW_RSS_VI_CONFIG_CMD_UDPEN(x) ((x) << S_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define G_FW_RSS_VI_CONFIG_CMD_UDPEN(x) \ + (((x) >> S_FW_RSS_VI_CONFIG_CMD_UDPEN) & M_FW_RSS_VI_CONFIG_CMD_UDPEN) +#define F_FW_RSS_VI_CONFIG_CMD_UDPEN V_FW_RSS_VI_CONFIG_CMD_UDPEN(1U) + +/****************************************************************************** + * D E B U G C O M M A N D s + ******************************************************/ + +struct fw_debug_cmd { + __be32 op_type; + __be32 len16_pkd; + union fw_debug { + struct fw_debug_assert { + __be32 fcid; + __be32 line; + __be32 x; + __be32 y; + __u8 filename_0_7[8]; + __u8 filename_8_15[8]; + __be64 r3; + } assert; + struct fw_debug_prt { + __be16 dprtstridx; + __be16 r3[3]; + __be32 dprtstrparam0; + __be32 dprtstrparam1; + __be32 dprtstrparam2; + __be32 dprtstrparam3; + } prt; + } u; +}; + +#define S_FW_DEBUG_CMD_TYPE 0 +#define M_FW_DEBUG_CMD_TYPE 0xff +#define V_FW_DEBUG_CMD_TYPE(x) ((x) << S_FW_DEBUG_CMD_TYPE) +#define G_FW_DEBUG_CMD_TYPE(x) \ + (((x) >> S_FW_DEBUG_CMD_TYPE) & M_FW_DEBUG_CMD_TYPE) + +/****************************************************************************** + * P C I E F W R E G I S T E R + **************************************/ + +/* + * Register definitions for the PCIE_FW register which the firmware uses + * to retain status across RESETs. This register should be considered + * as a READ-ONLY register for Host Software and only to be used to + * track firmware initialization/error state, etc. + */ +#define S_PCIE_FW_ERR 31 +#define M_PCIE_FW_ERR 0x1 +#define V_PCIE_FW_ERR(x) ((x) << S_PCIE_FW_ERR) +#define G_PCIE_FW_ERR(x) (((x) >> S_PCIE_FW_ERR) & M_PCIE_FW_ERR) +#define F_PCIE_FW_ERR V_PCIE_FW_ERR(1U) + +#define S_PCIE_FW_INIT 30 +#define M_PCIE_FW_INIT 0x1 +#define V_PCIE_FW_INIT(x) ((x) << S_PCIE_FW_INIT) +#define G_PCIE_FW_INIT(x) (((x) >> S_PCIE_FW_INIT) & M_PCIE_FW_INIT) +#define F_PCIE_FW_INIT V_PCIE_FW_INIT(1U) + +#define S_PCIE_FW_HALT 29 +#define M_PCIE_FW_HALT 0x1 +#define V_PCIE_FW_HALT(x) ((x) << S_PCIE_FW_HALT) +#define G_PCIE_FW_HALT(x) (((x) >> S_PCIE_FW_HALT) & M_PCIE_FW_HALT) +#define F_PCIE_FW_HALT V_PCIE_FW_HALT(1U) + +#define S_PCIE_FW_EVAL 24 +#define M_PCIE_FW_EVAL 0x7 +#define V_PCIE_FW_EVAL(x) ((x) << S_PCIE_FW_EVAL) +#define G_PCIE_FW_EVAL(x) (((x) >> S_PCIE_FW_EVAL) & M_PCIE_FW_EVAL) + +#define S_PCIE_FW_MASTER_VLD 15 +#define M_PCIE_FW_MASTER_VLD 0x1 +#define V_PCIE_FW_MASTER_VLD(x) ((x) << S_PCIE_FW_MASTER_VLD) +#define G_PCIE_FW_MASTER_VLD(x) \ + (((x) >> S_PCIE_FW_MASTER_VLD) & M_PCIE_FW_MASTER_VLD) +#define F_PCIE_FW_MASTER_VLD V_PCIE_FW_MASTER_VLD(1U) + +#define S_PCIE_FW_MASTER 12 +#define M_PCIE_FW_MASTER 0x7 +#define V_PCIE_FW_MASTER(x) ((x) << S_PCIE_FW_MASTER) +#define G_PCIE_FW_MASTER(x) (((x) >> S_PCIE_FW_MASTER) & M_PCIE_FW_MASTER) + +/****************************************************************************** + * B I N A R Y H E A D E R F O R M A T + **********************************************/ + +/* + * firmware binary header format + */ +struct fw_hdr { + __u8 ver; + __u8 chip; /* terminator chip family */ + __be16 len512; /* bin length in units of 512-bytes */ + __be32 fw_ver; /* firmware version */ + __be32 tp_microcode_ver; /* tcp processor microcode version */ + __u8 intfver_nic; + __u8 intfver_vnic; + __u8 intfver_ofld; + __u8 intfver_ri; + __u8 intfver_iscsipdu; + __u8 intfver_iscsi; + __u8 intfver_fcoepdu; + __u8 intfver_fcoe; + __u32 reserved2; + __u32 reserved3; + __u32 magic; /* runtime or bootstrap fw */ + __be32 flags; + __be32 reserved6[23]; +}; + +#define S_FW_HDR_FW_VER_MAJOR 24 +#define M_FW_HDR_FW_VER_MAJOR 0xff +#define V_FW_HDR_FW_VER_MAJOR(x) \ + ((x) << S_FW_HDR_FW_VER_MAJOR) +#define G_FW_HDR_FW_VER_MAJOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MAJOR) & M_FW_HDR_FW_VER_MAJOR) + +#define S_FW_HDR_FW_VER_MINOR 16 +#define M_FW_HDR_FW_VER_MINOR 0xff +#define V_FW_HDR_FW_VER_MINOR(x) \ + ((x) << S_FW_HDR_FW_VER_MINOR) +#define G_FW_HDR_FW_VER_MINOR(x) \ + (((x) >> S_FW_HDR_FW_VER_MINOR) & M_FW_HDR_FW_VER_MINOR) + +#define S_FW_HDR_FW_VER_MICRO 8 +#define M_FW_HDR_FW_VER_MICRO 0xff +#define V_FW_HDR_FW_VER_MICRO(x) \ + ((x) << S_FW_HDR_FW_VER_MICRO) +#define G_FW_HDR_FW_VER_MICRO(x) \ + (((x) >> S_FW_HDR_FW_VER_MICRO) & M_FW_HDR_FW_VER_MICRO) + +#define S_FW_HDR_FW_VER_BUILD 0 +#define M_FW_HDR_FW_VER_BUILD 0xff +#define V_FW_HDR_FW_VER_BUILD(x) \ + ((x) << S_FW_HDR_FW_VER_BUILD) +#define G_FW_HDR_FW_VER_BUILD(x) \ + (((x) >> S_FW_HDR_FW_VER_BUILD) & M_FW_HDR_FW_VER_BUILD) + +#endif /* _T4FW_INTERFACE_H_ */ -- 2.20.1