1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
6 #include <netinet/in.h>
8 #include <rte_interrupts.h>
10 #include <rte_debug.h>
12 #include <rte_atomic.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_memory.h>
15 #include <rte_tailq.h>
17 #include <rte_alarm.h>
18 #include <rte_ether.h>
19 #include <rte_ethdev_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_random.h>
23 #include <rte_byteorder.h>
27 #include "t4_regs_values.h"
28 #include "t4fw_interface.h"
31 * t4_read_mtu_tbl - returns the values in the HW path MTU table
33 * @mtus: where to store the MTU values
34 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
36 * Reads the HW path MTU table.
38 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
43 for (i = 0; i < NMTUS; ++i) {
44 t4_write_reg(adap, A_TP_MTU_TABLE,
45 V_MTUINDEX(0xff) | V_MTUVALUE(i));
46 v = t4_read_reg(adap, A_TP_MTU_TABLE);
47 mtus[i] = G_MTUVALUE(v);
49 mtu_log[i] = G_MTUWIDTH(v);
54 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
56 * @addr: the indirect TP register address
57 * @mask: specifies the field within the register to modify
58 * @val: new value for the field
60 * Sets a field of an indirect TP register to the given value.
62 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
63 unsigned int mask, unsigned int val)
65 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
66 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
67 t4_write_reg(adap, A_TP_PIO_DATA, val);
70 /* The minimum additive increment value for the congestion control table */
71 #define CC_MIN_INCR 2U
74 * t4_load_mtus - write the MTU and congestion control HW tables
76 * @mtus: the values for the MTU table
77 * @alpha: the values for the congestion control alpha parameter
78 * @beta: the values for the congestion control beta parameter
80 * Write the HW MTU table with the supplied MTUs and the high-speed
81 * congestion control table with the supplied alpha, beta, and MTUs.
82 * We write the two tables together because the additive increments
85 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
86 const unsigned short *alpha, const unsigned short *beta)
88 static const unsigned int avg_pkts[NCCTRL_WIN] = {
89 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
90 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
91 28672, 40960, 57344, 81920, 114688, 163840, 229376
96 for (i = 0; i < NMTUS; ++i) {
97 unsigned int mtu = mtus[i];
98 unsigned int log2 = cxgbe_fls(mtu);
100 if (!(mtu & ((1 << log2) >> 2))) /* round */
102 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
103 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
105 for (w = 0; w < NCCTRL_WIN; ++w) {
108 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
111 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
112 (w << 16) | (beta[w] << 13) | inc);
118 * t4_wait_op_done_val - wait until an operation is completed
119 * @adapter: the adapter performing the operation
120 * @reg: the register to check for completion
121 * @mask: a single-bit field within @reg that indicates completion
122 * @polarity: the value of the field when the operation is completed
123 * @attempts: number of check iterations
124 * @delay: delay in usecs between iterations
125 * @valp: where to store the value of the register at completion time
127 * Wait until an operation is completed by checking a bit in a register
128 * up to @attempts times. If @valp is not NULL the value of the register
129 * at the time it indicated completion is stored there. Returns 0 if the
130 * operation completes and -EAGAIN otherwise.
132 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
133 int polarity, int attempts, int delay, u32 *valp)
136 u32 val = t4_read_reg(adapter, reg);
138 if (!!(val & mask) == polarity) {
151 * t4_set_reg_field - set a register field to a value
152 * @adapter: the adapter to program
153 * @addr: the register address
154 * @mask: specifies the portion of the register to modify
155 * @val: the new value for the register field
157 * Sets a register field specified by the supplied mask to the
160 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
163 u32 v = t4_read_reg(adapter, addr) & ~mask;
165 t4_write_reg(adapter, addr, v | val);
166 (void)t4_read_reg(adapter, addr); /* flush */
170 * t4_read_indirect - read indirectly addressed registers
172 * @addr_reg: register holding the indirect address
173 * @data_reg: register holding the value of the indirect register
174 * @vals: where the read register values are stored
175 * @nregs: how many indirect registers to read
176 * @start_idx: index of first indirect register to read
178 * Reads registers that are accessed indirectly through an address/data
181 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
182 unsigned int data_reg, u32 *vals, unsigned int nregs,
183 unsigned int start_idx)
186 t4_write_reg(adap, addr_reg, start_idx);
187 *vals++ = t4_read_reg(adap, data_reg);
193 * t4_write_indirect - write indirectly addressed registers
195 * @addr_reg: register holding the indirect addresses
196 * @data_reg: register holding the value for the indirect registers
197 * @vals: values to write
198 * @nregs: how many indirect registers to write
199 * @start_idx: address of first indirect register to write
201 * Writes a sequential block of registers that are accessed indirectly
202 * through an address/data register pair.
204 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
205 unsigned int data_reg, const u32 *vals,
206 unsigned int nregs, unsigned int start_idx)
209 t4_write_reg(adap, addr_reg, start_idx++);
210 t4_write_reg(adap, data_reg, *vals++);
215 * t4_report_fw_error - report firmware error
218 * The adapter firmware can indicate error conditions to the host.
219 * If the firmware has indicated an error, print out the reason for
220 * the firmware error.
222 static void t4_report_fw_error(struct adapter *adap)
224 static const char * const reason[] = {
225 "Crash", /* PCIE_FW_EVAL_CRASH */
226 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
227 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
228 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
229 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
230 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
231 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
232 "Reserved", /* reserved */
236 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
237 if (pcie_fw & F_PCIE_FW_ERR)
238 pr_err("%s: Firmware reports adapter error: %s\n",
239 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
243 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
245 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
248 for ( ; nflit; nflit--, mbox_addr += 8)
249 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
253 * Handle a FW assertion reported in a mailbox.
255 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
257 struct fw_debug_cmd asrt;
259 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
260 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
261 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
262 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
265 #define X_CIM_PF_NOACCESS 0xeeeeeeee
268 * If the Host OS Driver needs locking arround accesses to the mailbox, this
269 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
271 /* makes single-statement usage a bit cleaner ... */
272 #ifdef T4_OS_NEEDS_MBOX_LOCKING
273 #define T4_OS_MBOX_LOCKING(x) x
275 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
279 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
281 * @mbox: index of the mailbox to use
282 * @cmd: the command to write
283 * @size: command length in bytes
284 * @rpl: where to optionally store the reply
285 * @sleep_ok: if true we may sleep while awaiting command completion
286 * @timeout: time to wait for command to finish before timing out
287 * (negative implies @sleep_ok=false)
289 * Sends the given command to FW through the selected mailbox and waits
290 * for the FW to execute the command. If @rpl is not %NULL it is used to
291 * store the FW's reply to the command. The command and its optional
292 * reply are of the same length. Some FW commands like RESET and
293 * INITIALIZE can take a considerable amount of time to execute.
294 * @sleep_ok determines whether we may sleep while awaiting the response.
295 * If sleeping is allowed we use progressive backoff otherwise we spin.
296 * Note that passing in a negative @timeout is an alternate mechanism
297 * for specifying @sleep_ok=false. This is useful when a higher level
298 * interface allows for specification of @timeout but not @sleep_ok ...
300 * Returns 0 on success or a negative errno on failure. A
301 * failure can happen either because we are not able to execute the
302 * command or FW executes it but signals an error. In the latter case
303 * the return value is the error code indicated by FW (negated).
305 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
306 const void __attribute__((__may_alias__)) *cmd,
307 int size, void *rpl, bool sleep_ok, int timeout)
310 * We delay in small increments at first in an effort to maintain
311 * responsiveness for simple, fast executing commands but then back
312 * off to larger delays to a maximum retry delay.
314 static const int delay[] = {
315 1, 1, 3, 5, 10, 10, 20, 50, 100
321 unsigned int delay_idx;
322 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
324 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
325 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
327 struct mbox_entry entry;
333 if ((size & 15) || size > MBOX_LEN) {
339 memcpy(p, (const __be64 *)cmd, size);
342 * If we have a negative timeout, that implies that we can't sleep.
349 #ifdef T4_OS_NEEDS_MBOX_LOCKING
351 * Queue ourselves onto the mailbox access list. When our entry is at
352 * the front of the list, we have rights to access the mailbox. So we
353 * wait [for a while] till we're at the front [or bail out with an
356 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
361 for (i = 0; ; i += ms) {
363 * If we've waited too long, return a busy indication. This
364 * really ought to be based on our initial position in the
365 * mailbox access list but this is a start. We very rarely
366 * contend on access to the mailbox ... Also check for a
367 * firmware error which we'll report as a device error.
369 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
370 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
371 t4_os_atomic_list_del(&entry, &adap->mbox_list,
373 t4_report_fw_error(adap);
375 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
379 * If we're at the head, break out and start the mailbox
382 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
386 * Delay for a bit before checking again ...
389 ms = delay[delay_idx]; /* last element may repeat */
390 if (delay_idx < ARRAY_SIZE(delay) - 1)
397 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
400 * Attempt to gain access to the mailbox.
402 for (i = 0; i < 4; i++) {
403 ctl = t4_read_reg(adap, ctl_reg);
405 if (v != X_MBOWNER_NONE)
410 * If we were unable to gain access, dequeue ourselves from the
411 * mailbox atomic access list and report the error to our caller.
413 if (v != X_MBOWNER_PL) {
414 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
417 t4_report_fw_error(adap);
419 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
423 * If we gain ownership of the mailbox and there's a "valid" message
424 * in it, this is likely an asynchronous error message from the
425 * firmware. So we'll report that and then proceed on with attempting
426 * to issue our own command ... which may well fail if the error
427 * presaged the firmware crashing ...
429 if (ctl & F_MBMSGVALID) {
430 dev_err(adap, "found VALID command in mbox %u: "
431 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
432 (unsigned long long)t4_read_reg64(adap, data_reg),
433 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
434 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
435 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
436 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
437 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
438 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
439 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
443 * Copy in the new mailbox command and send it on its way ...
445 for (i = 0; i < size; i += 8, p++)
446 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
448 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
449 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
450 (unsigned long long)t4_read_reg64(adap, data_reg),
451 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
452 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
453 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
454 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
455 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
456 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
457 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
459 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
460 t4_read_reg(adap, ctl_reg); /* flush write */
466 * Loop waiting for the reply; bail out if we time out or the firmware
469 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
470 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
472 ms = delay[delay_idx]; /* last element may repeat */
473 if (delay_idx < ARRAY_SIZE(delay) - 1)
480 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
481 v = t4_read_reg(adap, ctl_reg);
482 if (v == X_CIM_PF_NOACCESS)
484 if (G_MBOWNER(v) == X_MBOWNER_PL) {
485 if (!(v & F_MBMSGVALID)) {
486 t4_write_reg(adap, ctl_reg,
487 V_MBOWNER(X_MBOWNER_NONE));
491 CXGBE_DEBUG_MBOX(adap,
492 "%s: mbox %u: %016llx %016llx %016llx %016llx "
493 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
494 (unsigned long long)t4_read_reg64(adap, data_reg),
495 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
496 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
497 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
498 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
499 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
500 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
501 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
503 CXGBE_DEBUG_MBOX(adap,
504 "command %#x completed in %d ms (%ssleeping)\n",
506 i + ms, sleep_ok ? "" : "non-");
508 res = t4_read_reg64(adap, data_reg);
509 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
510 fw_asrt(adap, data_reg);
511 res = V_FW_CMD_RETVAL(EIO);
513 get_mbox_rpl(adap, rpl, size / 8, data_reg);
515 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
517 t4_os_atomic_list_del(&entry, &adap->mbox_list,
520 return -G_FW_CMD_RETVAL((int)res);
525 * We timed out waiting for a reply to our mailbox command. Report
526 * the error and also check to see if the firmware reported any
529 dev_err(adap, "command %#x in mailbox %d timed out\n",
530 *(const u8 *)cmd, mbox);
531 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
534 t4_report_fw_error(adap);
536 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
539 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
540 void *rpl, bool sleep_ok)
542 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
547 * t4_get_regs_len - return the size of the chips register set
548 * @adapter: the adapter
550 * Returns the size of the chip's BAR0 register space.
552 unsigned int t4_get_regs_len(struct adapter *adapter)
554 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
556 switch (chip_version) {
559 return T5_REGMAP_SIZE;
563 "Unsupported chip version %d\n", chip_version);
568 * t4_get_regs - read chip registers into provided buffer
570 * @buf: register buffer
571 * @buf_size: size (in bytes) of register buffer
573 * If the provided register buffer isn't large enough for the chip's
574 * full register range, the register dump will be truncated to the
575 * register buffer's size.
577 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
579 static const unsigned int t5_reg_ranges[] = {
1354 static const unsigned int t6_reg_ranges[] = {
1915 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1916 const unsigned int *reg_ranges;
1917 int reg_ranges_size, range;
1918 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1920 /* Select the right set of register ranges to dump depending on the
1921 * adapter chip type.
1923 switch (chip_version) {
1925 reg_ranges = t5_reg_ranges;
1926 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1930 reg_ranges = t6_reg_ranges;
1931 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1936 "Unsupported chip version %d\n", chip_version);
1940 /* Clear the register buffer and insert the appropriate register
1941 * values selected by the above register ranges.
1943 memset(buf, 0, buf_size);
1944 for (range = 0; range < reg_ranges_size; range += 2) {
1945 unsigned int reg = reg_ranges[range];
1946 unsigned int last_reg = reg_ranges[range + 1];
1947 u32 *bufp = (u32 *)((char *)buf + reg);
1949 /* Iterate across the register range filling in the register
1950 * buffer but don't write past the end of the register buffer.
1952 while (reg <= last_reg && bufp < buf_end) {
1953 *bufp++ = t4_read_reg(adap, reg);
1959 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1960 #define EEPROM_DELAY 10 /* 10us per poll spin */
1961 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1963 #define EEPROM_STAT_ADDR 0x7bfc
1966 * Small utility function to wait till any outstanding VPD Access is complete.
1967 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1968 * VPD Access in flight. This allows us to handle the problem of having a
1969 * previous VPD Access time out and prevent an attempt to inject a new VPD
1970 * Request before any in-flight VPD request has completed.
1972 static int t4_seeprom_wait(struct adapter *adapter)
1974 unsigned int base = adapter->params.pci.vpd_cap_addr;
1977 /* If no VPD Access is in flight, we can just return success right
1980 if (!adapter->vpd_busy)
1983 /* Poll the VPD Capability Address/Flag register waiting for it
1984 * to indicate that the operation is complete.
1986 max_poll = EEPROM_MAX_POLL;
1990 udelay(EEPROM_DELAY);
1991 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1993 /* If the operation is complete, mark the VPD as no longer
1994 * busy and return success.
1996 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1997 adapter->vpd_busy = 0;
2000 } while (--max_poll);
2002 /* Failure! Note that we leave the VPD Busy status set in order to
2003 * avoid pushing a new VPD Access request into the VPD Capability till
2004 * the current operation eventually succeeds. It's a bug to issue a
2005 * new request when an existing request is in flight and will result
2006 * in corrupt hardware state.
2012 * t4_seeprom_read - read a serial EEPROM location
2013 * @adapter: adapter to read
2014 * @addr: EEPROM virtual address
2015 * @data: where to store the read data
2017 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2018 * VPD capability. Note that this function must be called with a virtual
2021 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2023 unsigned int base = adapter->params.pci.vpd_cap_addr;
2026 /* VPD Accesses must alway be 4-byte aligned!
2028 if (addr >= EEPROMVSIZE || (addr & 3))
2031 /* Wait for any previous operation which may still be in flight to
2034 ret = t4_seeprom_wait(adapter);
2036 dev_err(adapter, "VPD still busy from previous operation\n");
2040 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2041 * for our request to complete. If it doesn't complete, note the
2042 * error and return it to our caller. Note that we do not reset the
2045 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2046 adapter->vpd_busy = 1;
2047 adapter->vpd_flag = PCI_VPD_ADDR_F;
2048 ret = t4_seeprom_wait(adapter);
2050 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2054 /* Grab the returned data, swizzle it into our endianness and
2057 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2058 *data = le32_to_cpu(*data);
2063 * t4_seeprom_write - write a serial EEPROM location
2064 * @adapter: adapter to write
2065 * @addr: virtual EEPROM address
2066 * @data: value to write
2068 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2069 * VPD capability. Note that this function must be called with a virtual
2072 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2074 unsigned int base = adapter->params.pci.vpd_cap_addr;
2079 /* VPD Accesses must alway be 4-byte aligned!
2081 if (addr >= EEPROMVSIZE || (addr & 3))
2084 /* Wait for any previous operation which may still be in flight to
2087 ret = t4_seeprom_wait(adapter);
2089 dev_err(adapter, "VPD still busy from previous operation\n");
2093 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2094 * for our request to complete. If it doesn't complete, note the
2095 * error and return it to our caller. Note that we do not reset the
2098 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2100 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2101 (u16)addr | PCI_VPD_ADDR_F);
2102 adapter->vpd_busy = 1;
2103 adapter->vpd_flag = 0;
2104 ret = t4_seeprom_wait(adapter);
2106 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2110 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2111 * request to complete. If it doesn't complete, return error.
2113 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2114 max_poll = EEPROM_MAX_POLL;
2116 udelay(EEPROM_DELAY);
2117 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2118 } while ((stats_reg & 0x1) && --max_poll);
2122 /* Return success! */
2127 * t4_seeprom_wp - enable/disable EEPROM write protection
2128 * @adapter: the adapter
2129 * @enable: whether to enable or disable write protection
2131 * Enables or disables write protection on the serial EEPROM.
2133 int t4_seeprom_wp(struct adapter *adapter, int enable)
2135 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2139 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2140 * @adap: the adapter
2141 * @vals: where the indirect register values are stored/written
2142 * @nregs: how many indirect registers to read/write
2143 * @start_idx: index of first indirect register to read/write
2144 * @rw: Read (1) or Write (0)
2146 * Access TP PIO registers through LDST
2148 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2149 unsigned int start_index, unsigned int rw)
2151 int cmd = FW_LDST_ADDRSPC_TP_PIO;
2152 struct fw_ldst_cmd c;
2156 for (i = 0 ; i < nregs; i++) {
2157 memset(&c, 0, sizeof(c));
2158 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2160 (rw ? F_FW_CMD_READ :
2162 V_FW_LDST_CMD_ADDRSPACE(cmd));
2163 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2165 c.u.addrval.addr = cpu_to_be32(start_index + i);
2166 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
2167 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2170 vals[i] = be32_to_cpu(c.u.addrval.val);
2176 * t4_read_rss_key - read the global RSS key
2177 * @adap: the adapter
2178 * @key: 10-entry array holding the 320-bit RSS key
2180 * Reads the global 320-bit RSS key.
2182 void t4_read_rss_key(struct adapter *adap, u32 *key)
2184 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2188 * t4_write_rss_key - program one of the RSS keys
2189 * @adap: the adapter
2190 * @key: 10-entry array holding the 320-bit RSS key
2191 * @idx: which RSS key to write
2193 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2194 * 0..15 the corresponding entry in the RSS key table is written,
2195 * otherwise the global RSS key is written.
2197 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2199 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2200 u8 rss_key_addr_cnt = 16;
2202 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2203 * allows access to key addresses 16-63 by using KeyWrAddrX
2204 * as index[5:4](upper 2) into key table
2206 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2207 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2208 rss_key_addr_cnt = 32;
2210 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2212 if (idx >= 0 && idx < rss_key_addr_cnt) {
2213 if (rss_key_addr_cnt > 16)
2214 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2215 V_KEYWRADDRX(idx >> 4) |
2216 V_T6_VFWRADDR(idx) | F_KEYWREN);
2218 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2219 V_KEYWRADDR(idx) | F_KEYWREN);
2224 * t4_config_rss_range - configure a portion of the RSS mapping table
2225 * @adapter: the adapter
2226 * @mbox: mbox to use for the FW command
2227 * @viid: virtual interface whose RSS subtable is to be written
2228 * @start: start entry in the table to write
2229 * @n: how many table entries to write
2230 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2231 * @nrspq: number of values in @rspq
2233 * Programs the selected part of the VI's RSS mapping table with the
2234 * provided values. If @nrspq < @n the supplied values are used repeatedly
2235 * until the full table range is populated.
2237 * The caller must ensure the values in @rspq are in the range allowed for
2240 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2241 int start, int n, const u16 *rspq, unsigned int nrspq)
2244 const u16 *rsp = rspq;
2245 const u16 *rsp_end = rspq + nrspq;
2246 struct fw_rss_ind_tbl_cmd cmd;
2248 memset(&cmd, 0, sizeof(cmd));
2249 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2250 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2251 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2252 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2255 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2256 * Queue Identifiers. These Ingress Queue IDs are packed three to
2257 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2261 int nq = min(n, 32);
2263 __be32 *qp = &cmd.iq0_to_iq2;
2266 * Set up the firmware RSS command header to send the next
2267 * "nq" Ingress Queue IDs to the firmware.
2269 cmd.niqid = cpu_to_be16(nq);
2270 cmd.startidx = cpu_to_be16(start);
2273 * "nq" more done for the start of the next loop.
2279 * While there are still Ingress Queue IDs to stuff into the
2280 * current firmware RSS command, retrieve them from the
2281 * Ingress Queue ID array and insert them into the command.
2285 * Grab up to the next 3 Ingress Queue IDs (wrapping
2286 * around the Ingress Queue ID array if necessary) and
2287 * insert them into the firmware RSS command at the
2288 * current 3-tuple position within the commad.
2292 int nqbuf = min(3, nq);
2298 while (nqbuf && nq_packed < 32) {
2305 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2306 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2307 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2311 * Send this portion of the RRS table update to the firmware;
2312 * bail out on any errors.
2314 if (is_pf4(adapter))
2315 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2318 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2327 * t4_config_vi_rss - configure per VI RSS settings
2328 * @adapter: the adapter
2329 * @mbox: mbox to use for the FW command
2332 * @defq: id of the default RSS queue for the VI.
2334 * Configures VI-specific RSS properties.
2336 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2337 unsigned int flags, unsigned int defq)
2339 struct fw_rss_vi_config_cmd c;
2341 memset(&c, 0, sizeof(c));
2342 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2343 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2344 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2345 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2346 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2347 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2348 if (is_pf4(adapter))
2349 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2351 return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2355 * t4_read_config_vi_rss - read the configured per VI RSS settings
2356 * @adapter: the adapter
2357 * @mbox: mbox to use for the FW command
2359 * @flags: where to place the configured flags
2360 * @defq: where to place the id of the default RSS queue for the VI.
2362 * Read configured VI-specific RSS properties.
2364 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2365 u64 *flags, unsigned int *defq)
2367 struct fw_rss_vi_config_cmd c;
2368 unsigned int result;
2371 memset(&c, 0, sizeof(c));
2372 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2373 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2374 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2375 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2376 ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2378 result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2380 *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2382 *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2389 * init_cong_ctrl - initialize congestion control parameters
2390 * @a: the alpha values for congestion control
2391 * @b: the beta values for congestion control
2393 * Initialize the congestion control parameters.
2395 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2399 for (i = 0; i < 9; i++) {
2453 #define INIT_CMD(var, cmd, rd_wr) do { \
2454 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2455 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2456 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2459 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2461 u32 cclk_param, cclk_val;
2465 * Ask firmware for the Core Clock since it knows how to translate the
2466 * Reference Clock ('V2') VPD field into a Core Clock value ...
2468 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2469 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2470 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2471 1, &cclk_param, &cclk_val);
2473 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2479 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2483 /* serial flash and firmware constants and flash config file constants */
2485 SF_ATTEMPTS = 10, /* max retries for SF operations */
2487 /* flash command opcodes */
2488 SF_PROG_PAGE = 2, /* program page */
2489 SF_WR_DISABLE = 4, /* disable writes */
2490 SF_RD_STATUS = 5, /* read status register */
2491 SF_WR_ENABLE = 6, /* enable writes */
2492 SF_RD_DATA_FAST = 0xb, /* read flash */
2493 SF_RD_ID = 0x9f, /* read ID */
2494 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2498 * sf1_read - read data from the serial flash
2499 * @adapter: the adapter
2500 * @byte_cnt: number of bytes to read
2501 * @cont: whether another operation will be chained
2502 * @lock: whether to lock SF for PL access only
2503 * @valp: where to store the read data
2505 * Reads up to 4 bytes of data from the serial flash. The location of
2506 * the read needs to be specified prior to calling this by issuing the
2507 * appropriate commands to the serial flash.
2509 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2510 int lock, u32 *valp)
2514 if (!byte_cnt || byte_cnt > 4)
2516 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2518 t4_write_reg(adapter, A_SF_OP,
2519 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2520 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2522 *valp = t4_read_reg(adapter, A_SF_DATA);
2527 * sf1_write - write data to the serial flash
2528 * @adapter: the adapter
2529 * @byte_cnt: number of bytes to write
2530 * @cont: whether another operation will be chained
2531 * @lock: whether to lock SF for PL access only
2532 * @val: value to write
2534 * Writes up to 4 bytes of data to the serial flash. The location of
2535 * the write needs to be specified prior to calling this by issuing the
2536 * appropriate commands to the serial flash.
2538 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2541 if (!byte_cnt || byte_cnt > 4)
2543 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2545 t4_write_reg(adapter, A_SF_DATA, val);
2546 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2547 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2548 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2552 * t4_read_flash - read words from serial flash
2553 * @adapter: the adapter
2554 * @addr: the start address for the read
2555 * @nwords: how many 32-bit words to read
2556 * @data: where to store the read data
2557 * @byte_oriented: whether to store data as bytes or as words
2559 * Read the specified number of 32-bit words from the serial flash.
2560 * If @byte_oriented is set the read data is stored as a byte array
2561 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2562 * natural endianness.
2564 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2565 unsigned int nwords, u32 *data, int byte_oriented)
2569 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2573 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2575 ret = sf1_write(adapter, 4, 1, 0, addr);
2579 ret = sf1_read(adapter, 1, 1, 0, data);
2583 for ( ; nwords; nwords--, data++) {
2584 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2586 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2590 *data = cpu_to_be32(*data);
2596 * t4_get_exprom_version - return the Expansion ROM version (if any)
2597 * @adapter: the adapter
2598 * @vers: where to place the version
2600 * Reads the Expansion ROM header from FLASH and returns the version
2601 * number (if present) through the @vers return value pointer. We return
2602 * this in the Firmware Version Format since it's convenient. Return
2603 * 0 on success, -ENOENT if no Expansion ROM is present.
2605 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2607 struct exprom_header {
2608 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2609 unsigned char hdr_ver[4]; /* Expansion ROM version */
2611 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2615 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2616 ARRAY_SIZE(exprom_header_buf),
2617 exprom_header_buf, 0);
2621 hdr = (struct exprom_header *)exprom_header_buf;
2622 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2625 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2626 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2627 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2628 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2633 * t4_get_fw_version - read the firmware version
2634 * @adapter: the adapter
2635 * @vers: where to place the version
2637 * Reads the FW version from flash.
2639 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2641 return t4_read_flash(adapter, FLASH_FW_START +
2642 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2646 * t4_get_bs_version - read the firmware bootstrap version
2647 * @adapter: the adapter
2648 * @vers: where to place the version
2650 * Reads the FW Bootstrap version from flash.
2652 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2654 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2655 offsetof(struct fw_hdr, fw_ver), 1,
2660 * t4_get_tp_version - read the TP microcode version
2661 * @adapter: the adapter
2662 * @vers: where to place the version
2664 * Reads the TP microcode version from flash.
2666 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2668 return t4_read_flash(adapter, FLASH_FW_START +
2669 offsetof(struct fw_hdr, tp_microcode_ver),
2674 * t4_get_version_info - extract various chip/firmware version information
2675 * @adapter: the adapter
2677 * Reads various chip/firmware version numbers and stores them into the
2678 * adapter Adapter Parameters structure. If any of the efforts fails
2679 * the first failure will be returned, but all of the version numbers
2682 int t4_get_version_info(struct adapter *adapter)
2686 #define FIRST_RET(__getvinfo) \
2688 int __ret = __getvinfo; \
2689 if (__ret && !ret) \
2693 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2694 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2695 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2696 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2704 * t4_dump_version_info - dump all of the adapter configuration IDs
2705 * @adapter: the adapter
2707 * Dumps all of the various bits of adapter configuration version/revision
2708 * IDs information. This is typically called at some point after
2709 * t4_get_version_info() has been called.
2711 void t4_dump_version_info(struct adapter *adapter)
2714 * Device information.
2716 dev_info(adapter, "Chelsio rev %d\n",
2717 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2722 if (!adapter->params.fw_vers)
2723 dev_warn(adapter, "No firmware loaded\n");
2725 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2726 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2727 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2728 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2729 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2732 * Bootstrap Firmware Version.
2734 if (!adapter->params.bs_vers)
2735 dev_warn(adapter, "No bootstrap loaded\n");
2737 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2738 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2739 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2740 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2741 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2744 * TP Microcode Version.
2746 if (!adapter->params.tp_vers)
2747 dev_warn(adapter, "No TP Microcode loaded\n");
2749 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2750 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2751 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2752 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2753 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2756 * Expansion ROM version.
2758 if (!adapter->params.er_vers)
2759 dev_info(adapter, "No Expansion ROM loaded\n");
2761 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2762 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2763 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2764 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2765 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2768 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
2771 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
2772 * @caps16: a 16-bit Port Capabilities value
2774 * Returns the equivalent 32-bit Port Capabilities value.
2776 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
2778 fw_port_cap32_t caps32 = 0;
2780 #define CAP16_TO_CAP32(__cap) \
2782 if (caps16 & FW_PORT_CAP_##__cap) \
2783 caps32 |= FW_PORT_CAP32_##__cap; \
2786 CAP16_TO_CAP32(SPEED_100M);
2787 CAP16_TO_CAP32(SPEED_1G);
2788 CAP16_TO_CAP32(SPEED_25G);
2789 CAP16_TO_CAP32(SPEED_10G);
2790 CAP16_TO_CAP32(SPEED_40G);
2791 CAP16_TO_CAP32(SPEED_100G);
2792 CAP16_TO_CAP32(FC_RX);
2793 CAP16_TO_CAP32(FC_TX);
2794 CAP16_TO_CAP32(ANEG);
2795 CAP16_TO_CAP32(MDIX);
2796 CAP16_TO_CAP32(MDIAUTO);
2797 CAP16_TO_CAP32(FEC_RS);
2798 CAP16_TO_CAP32(FEC_BASER_RS);
2799 CAP16_TO_CAP32(802_3_PAUSE);
2800 CAP16_TO_CAP32(802_3_ASM_DIR);
2802 #undef CAP16_TO_CAP32
2808 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
2809 * @caps32: a 32-bit Port Capabilities value
2811 * Returns the equivalent 16-bit Port Capabilities value. Note that
2812 * not all 32-bit Port Capabilities can be represented in the 16-bit
2813 * Port Capabilities and some fields/values may not make it.
2815 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
2817 fw_port_cap16_t caps16 = 0;
2819 #define CAP32_TO_CAP16(__cap) \
2821 if (caps32 & FW_PORT_CAP32_##__cap) \
2822 caps16 |= FW_PORT_CAP_##__cap; \
2825 CAP32_TO_CAP16(SPEED_100M);
2826 CAP32_TO_CAP16(SPEED_1G);
2827 CAP32_TO_CAP16(SPEED_10G);
2828 CAP32_TO_CAP16(SPEED_25G);
2829 CAP32_TO_CAP16(SPEED_40G);
2830 CAP32_TO_CAP16(SPEED_100G);
2831 CAP32_TO_CAP16(FC_RX);
2832 CAP32_TO_CAP16(FC_TX);
2833 CAP32_TO_CAP16(802_3_PAUSE);
2834 CAP32_TO_CAP16(802_3_ASM_DIR);
2835 CAP32_TO_CAP16(ANEG);
2836 CAP32_TO_CAP16(MDIX);
2837 CAP32_TO_CAP16(MDIAUTO);
2838 CAP32_TO_CAP16(FEC_RS);
2839 CAP32_TO_CAP16(FEC_BASER_RS);
2841 #undef CAP32_TO_CAP16
2846 /* Translate Firmware Pause specification to Common Code */
2847 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
2849 enum cc_pause cc_pause = 0;
2851 if (fw_pause & FW_PORT_CAP32_FC_RX)
2852 cc_pause |= PAUSE_RX;
2853 if (fw_pause & FW_PORT_CAP32_FC_TX)
2854 cc_pause |= PAUSE_TX;
2859 /* Translate Common Code Pause Frame specification into Firmware */
2860 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
2862 fw_port_cap32_t fw_pause = 0;
2864 if (cc_pause & PAUSE_RX)
2865 fw_pause |= FW_PORT_CAP32_FC_RX;
2866 if (cc_pause & PAUSE_TX)
2867 fw_pause |= FW_PORT_CAP32_FC_TX;
2872 /* Translate Firmware Forward Error Correction specification to Common Code */
2873 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
2875 enum cc_fec cc_fec = 0;
2877 if (fw_fec & FW_PORT_CAP32_FEC_RS)
2879 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
2880 cc_fec |= FEC_BASER_RS;
2885 /* Translate Common Code Forward Error Correction specification to Firmware */
2886 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
2888 fw_port_cap32_t fw_fec = 0;
2890 if (cc_fec & FEC_RS)
2891 fw_fec |= FW_PORT_CAP32_FEC_RS;
2892 if (cc_fec & FEC_BASER_RS)
2893 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
2899 * t4_link_l1cfg - apply link configuration to MAC/PHY
2900 * @adapter: the adapter
2901 * @mbox: the Firmware Mailbox to use
2902 * @port: the Port ID
2903 * @lc: the Port's Link Configuration
2905 * Set up a port's MAC and PHY according to a desired link configuration.
2906 * - If the PHY can auto-negotiate first decide what to advertise, then
2907 * enable/disable auto-negotiation as desired, and reset.
2908 * - If the PHY does not auto-negotiate just reset it.
2909 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2910 * otherwise do it later based on the outcome of auto-negotiation.
2912 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2913 struct link_config *lc)
2915 unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
2916 unsigned int fw_caps = adap->params.fw_caps_support;
2917 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
2918 struct fw_port_cmd cmd;
2922 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
2924 /* Convert Common Code Forward Error Control settings into the
2925 * Firmware's API. If the current Requested FEC has "Automatic"
2926 * (IEEE 802.3) specified, then we use whatever the Firmware
2927 * sent us as part of it's IEEE 802.3-based interpratation of
2928 * the Transceiver Module EPROM FEC parameters. Otherwise we
2929 * use whatever is in the current Requested FEC settings.
2931 if (lc->requested_fec & FEC_AUTO)
2932 cc_fec = lc->auto_fec;
2934 cc_fec = lc->requested_fec;
2935 fw_fec = cc_to_fwcap_fec(cc_fec);
2937 /* Figure out what our Requested Port Capabilities are going to be.
2939 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2940 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
2941 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2943 } else if (lc->autoneg == AUTONEG_DISABLE) {
2944 rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
2945 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2948 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
2951 /* And send that on to the Firmware ...
2953 memset(&cmd, 0, sizeof(cmd));
2954 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2955 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2956 V_FW_PORT_CMD_PORTID(port));
2957 cmd.action_to_len16 =
2958 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
2959 FW_PORT_ACTION_L1_CFG :
2960 FW_PORT_ACTION_L1_CFG32) |
2963 if (fw_caps == FW_CAPS16)
2964 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
2966 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
2968 return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
2972 * t4_flash_cfg_addr - return the address of the flash configuration file
2973 * @adapter: the adapter
2975 * Return the address within the flash where the Firmware Configuration
2976 * File is stored, or an error if the device FLASH is too small to contain
2977 * a Firmware Configuration File.
2979 int t4_flash_cfg_addr(struct adapter *adapter)
2982 * If the device FLASH isn't large enough to hold a Firmware
2983 * Configuration File, return an error.
2985 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2988 return FLASH_CFG_START;
2991 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2994 * t4_intr_enable - enable interrupts
2995 * @adapter: the adapter whose interrupts should be enabled
2997 * Enable PF-specific interrupts for the calling function and the top-level
2998 * interrupt concentrator for global interrupts. Interrupts are already
2999 * enabled at each module, here we just enable the roots of the interrupt
3002 * Note: this function should be called only when the driver manages
3003 * non PF-specific interrupts from the various HW modules. Only one PCI
3004 * function at a time should be doing this.
3006 void t4_intr_enable(struct adapter *adapter)
3009 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3010 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3011 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3013 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3014 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
3015 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
3016 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
3017 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
3018 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
3019 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
3020 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
3021 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
3022 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
3023 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
3027 * t4_intr_disable - disable interrupts
3028 * @adapter: the adapter whose interrupts should be disabled
3030 * Disable interrupts. We only disable the top-level interrupt
3031 * concentrators. The caller must be a PCI function managing global
3034 void t4_intr_disable(struct adapter *adapter)
3036 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3037 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3038 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3040 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
3041 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
3045 * t4_get_port_type_description - return Port Type string description
3046 * @port_type: firmware Port Type enumeration
3048 const char *t4_get_port_type_description(enum fw_port_type port_type)
3050 static const char * const port_type_description[] = {
3075 if (port_type < ARRAY_SIZE(port_type_description))
3076 return port_type_description[port_type];
3081 * t4_get_mps_bg_map - return the buffer groups associated with a port
3082 * @adap: the adapter
3083 * @pidx: the port index
3085 * Returns a bitmap indicating which MPS buffer groups are associated
3086 * with the given port. Bit i is set if buffer group i is used by the
3089 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
3091 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3092 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
3095 if (pidx >= nports) {
3096 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
3101 switch (chip_version) {
3106 case 2: return 3 << (2 * pidx);
3107 case 4: return 1 << pidx;
3113 case 2: return 1 << (2 * pidx);
3118 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3119 chip_version, nports);
3124 * t4_get_tp_ch_map - return TP ingress channels associated with a port
3125 * @adapter: the adapter
3126 * @pidx: the port index
3128 * Returns a bitmap indicating which TP Ingress Channels are associated with
3129 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
3131 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
3133 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
3134 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
3137 if (pidx >= nports) {
3138 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3143 switch (chip_version) {
3146 /* Note that this happens to be the same values as the MPS
3147 * Buffer Group Map for these Chips. But we replicate the code
3148 * here because they're really separate concepts.
3152 case 2: return 3 << (2 * pidx);
3153 case 4: return 1 << pidx;
3159 case 2: return 1 << pidx;
3164 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3165 chip_version, nports);
3170 * t4_get_port_stats - collect port statistics
3171 * @adap: the adapter
3172 * @idx: the port index
3173 * @p: the stats structure to fill
3175 * Collect statistics related to the given port from HW.
3177 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3179 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3180 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3182 #define GET_STAT(name) \
3183 t4_read_reg64(adap, \
3184 (is_t4(adap->params.chip) ? \
3185 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3186 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3187 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3189 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3190 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3191 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3192 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3193 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3194 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3195 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3196 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3197 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3198 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3199 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3200 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3201 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3202 p->tx_drop = GET_STAT(TX_PORT_DROP);
3203 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3204 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3205 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3206 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3207 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3208 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3209 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3210 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3211 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3213 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3214 if (stat_ctl & F_COUNTPAUSESTATTX) {
3215 p->tx_frames -= p->tx_pause;
3216 p->tx_octets -= p->tx_pause * 64;
3218 if (stat_ctl & F_COUNTPAUSEMCTX)
3219 p->tx_mcast_frames -= p->tx_pause;
3222 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3223 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3224 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3225 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3226 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3227 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3228 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3229 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3230 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3231 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3232 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3233 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3234 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3235 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3236 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3237 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3238 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3239 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3240 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3241 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3242 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3243 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3244 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3245 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3246 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3247 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3248 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3250 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3251 if (stat_ctl & F_COUNTPAUSESTATRX) {
3252 p->rx_frames -= p->rx_pause;
3253 p->rx_octets -= p->rx_pause * 64;
3255 if (stat_ctl & F_COUNTPAUSEMCRX)
3256 p->rx_mcast_frames -= p->rx_pause;
3259 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3260 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3261 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3262 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3263 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3264 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3265 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3266 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3273 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3274 * @adap: The adapter
3276 * @stats: Current stats to fill
3277 * @offset: Previous stats snapshot
3279 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3280 struct port_stats *stats,
3281 struct port_stats *offset)
3286 t4_get_port_stats(adap, idx, stats);
3287 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3288 i < (sizeof(struct port_stats) / sizeof(u64));
3294 * t4_clr_port_stats - clear port statistics
3295 * @adap: the adapter
3296 * @idx: the port index
3298 * Clear HW statistics for the given port.
3300 void t4_clr_port_stats(struct adapter *adap, int idx)
3303 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3306 if (is_t4(adap->params.chip))
3307 port_base_addr = PORT_BASE(idx);
3309 port_base_addr = T5_PORT_BASE(idx);
3311 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3312 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3313 t4_write_reg(adap, port_base_addr + i, 0);
3314 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3315 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3316 t4_write_reg(adap, port_base_addr + i, 0);
3317 for (i = 0; i < 4; i++)
3318 if (bgmap & (1 << i)) {
3320 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3323 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3329 * t4_fw_hello - establish communication with FW
3330 * @adap: the adapter
3331 * @mbox: mailbox to use for the FW command
3332 * @evt_mbox: mailbox to receive async FW events
3333 * @master: specifies the caller's willingness to be the device master
3334 * @state: returns the current device state (if non-NULL)
3336 * Issues a command to establish communication with FW. Returns either
3337 * an error (negative integer) or the mailbox of the Master PF.
3339 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3340 enum dev_master master, enum dev_state *state)
3343 struct fw_hello_cmd c;
3345 unsigned int master_mbox;
3346 int retries = FW_CMD_HELLO_RETRIES;
3349 memset(&c, 0, sizeof(c));
3350 INIT_CMD(c, HELLO, WRITE);
3351 c.err_to_clearinit = cpu_to_be32(
3352 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3353 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3354 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3355 M_FW_HELLO_CMD_MBMASTER) |
3356 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3357 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3358 F_FW_HELLO_CMD_CLEARINIT);
3361 * Issue the HELLO command to the firmware. If it's not successful
3362 * but indicates that we got a "busy" or "timeout" condition, retry
3363 * the HELLO until we exhaust our retry limit. If we do exceed our
3364 * retry limit, check to see if the firmware left us any error
3365 * information and report that if so ...
3367 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3368 if (ret != FW_SUCCESS) {
3369 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3371 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3372 t4_report_fw_error(adap);
3376 v = be32_to_cpu(c.err_to_clearinit);
3377 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3379 if (v & F_FW_HELLO_CMD_ERR)
3380 *state = DEV_STATE_ERR;
3381 else if (v & F_FW_HELLO_CMD_INIT)
3382 *state = DEV_STATE_INIT;
3384 *state = DEV_STATE_UNINIT;
3388 * If we're not the Master PF then we need to wait around for the
3389 * Master PF Driver to finish setting up the adapter.
3391 * Note that we also do this wait if we're a non-Master-capable PF and
3392 * there is no current Master PF; a Master PF may show up momentarily
3393 * and we wouldn't want to fail pointlessly. (This can happen when an
3394 * OS loads lots of different drivers rapidly at the same time). In
3395 * this case, the Master PF returned by the firmware will be
3396 * M_PCIE_FW_MASTER so the test below will work ...
3398 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3399 master_mbox != mbox) {
3400 int waiting = FW_CMD_HELLO_TIMEOUT;
3403 * Wait for the firmware to either indicate an error or
3404 * initialized state. If we see either of these we bail out
3405 * and report the issue to the caller. If we exhaust the
3406 * "hello timeout" and we haven't exhausted our retries, try
3407 * again. Otherwise bail with a timeout error.
3416 * If neither Error nor Initialialized are indicated
3417 * by the firmware keep waiting till we exaust our
3418 * timeout ... and then retry if we haven't exhausted
3421 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3422 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3433 * We either have an Error or Initialized condition
3434 * report errors preferentially.
3437 if (pcie_fw & F_PCIE_FW_ERR)
3438 *state = DEV_STATE_ERR;
3439 else if (pcie_fw & F_PCIE_FW_INIT)
3440 *state = DEV_STATE_INIT;
3444 * If we arrived before a Master PF was selected and
3445 * there's not a valid Master PF, grab its identity
3448 if (master_mbox == M_PCIE_FW_MASTER &&
3449 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3450 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3459 * t4_fw_bye - end communication with FW
3460 * @adap: the adapter
3461 * @mbox: mailbox to use for the FW command
3463 * Issues a command to terminate communication with FW.
3465 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3467 struct fw_bye_cmd c;
3469 memset(&c, 0, sizeof(c));
3470 INIT_CMD(c, BYE, WRITE);
3471 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3475 * t4_fw_reset - issue a reset to FW
3476 * @adap: the adapter
3477 * @mbox: mailbox to use for the FW command
3478 * @reset: specifies the type of reset to perform
3480 * Issues a reset command of the specified type to FW.
3482 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3484 struct fw_reset_cmd c;
3486 memset(&c, 0, sizeof(c));
3487 INIT_CMD(c, RESET, WRITE);
3488 c.val = cpu_to_be32(reset);
3489 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3493 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3494 * @adap: the adapter
3495 * @mbox: mailbox to use for the FW RESET command (if desired)
3496 * @force: force uP into RESET even if FW RESET command fails
3498 * Issues a RESET command to firmware (if desired) with a HALT indication
3499 * and then puts the microprocessor into RESET state. The RESET command
3500 * will only be issued if a legitimate mailbox is provided (mbox <=
3501 * M_PCIE_FW_MASTER).
3503 * This is generally used in order for the host to safely manipulate the
3504 * adapter without fear of conflicting with whatever the firmware might
3505 * be doing. The only way out of this state is to RESTART the firmware
3508 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3513 * If a legitimate mailbox is provided, issue a RESET command
3514 * with a HALT indication.
3516 if (mbox <= M_PCIE_FW_MASTER) {
3517 struct fw_reset_cmd c;
3519 memset(&c, 0, sizeof(c));
3520 INIT_CMD(c, RESET, WRITE);
3521 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3522 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3523 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3527 * Normally we won't complete the operation if the firmware RESET
3528 * command fails but if our caller insists we'll go ahead and put the
3529 * uP into RESET. This can be useful if the firmware is hung or even
3530 * missing ... We'll have to take the risk of putting the uP into
3531 * RESET without the cooperation of firmware in that case.
3533 * We also force the firmware's HALT flag to be on in case we bypassed
3534 * the firmware RESET command above or we're dealing with old firmware
3535 * which doesn't have the HALT capability. This will serve as a flag
3536 * for the incoming firmware to know that it's coming out of a HALT
3537 * rather than a RESET ... if it's new enough to understand that ...
3539 if (ret == 0 || force) {
3540 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3541 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3546 * And we always return the result of the firmware RESET command
3547 * even when we force the uP into RESET ...
3553 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3554 * @adap: the adapter
3555 * @mbox: mailbox to use for the FW RESET command (if desired)
3556 * @reset: if we want to do a RESET to restart things
3558 * Restart firmware previously halted by t4_fw_halt(). On successful
3559 * return the previous PF Master remains as the new PF Master and there
3560 * is no need to issue a new HELLO command, etc.
3562 * We do this in two ways:
3564 * 1. If we're dealing with newer firmware we'll simply want to take
3565 * the chip's microprocessor out of RESET. This will cause the
3566 * firmware to start up from its start vector. And then we'll loop
3567 * until the firmware indicates it's started again (PCIE_FW.HALT
3568 * reset to 0) or we timeout.
3570 * 2. If we're dealing with older firmware then we'll need to RESET
3571 * the chip since older firmware won't recognize the PCIE_FW.HALT
3572 * flag and automatically RESET itself on startup.
3574 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3578 * Since we're directing the RESET instead of the firmware
3579 * doing it automatically, we need to clear the PCIE_FW.HALT
3582 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3585 * If we've been given a valid mailbox, first try to get the
3586 * firmware to do the RESET. If that works, great and we can
3587 * return success. Otherwise, if we haven't been given a
3588 * valid mailbox or the RESET command failed, fall back to
3589 * hitting the chip with a hammer.
3591 if (mbox <= M_PCIE_FW_MASTER) {
3592 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3594 if (t4_fw_reset(adap, mbox,
3595 F_PIORST | F_PIORSTMODE) == 0)
3599 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3604 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3605 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3606 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3617 * t4_fl_pkt_align - return the fl packet alignment
3618 * @adap: the adapter
3620 * T4 has a single field to specify the packing and padding boundary.
3621 * T5 onwards has separate fields for this and hence the alignment for
3622 * next packet offset is maximum of these two.
3624 int t4_fl_pkt_align(struct adapter *adap)
3626 u32 sge_control, sge_control2;
3627 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3629 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3631 /* T4 uses a single control field to specify both the PCIe Padding and
3632 * Packing Boundary. T5 introduced the ability to specify these
3633 * separately. The actual Ingress Packet Data alignment boundary
3634 * within Packed Buffer Mode is the maximum of these two
3637 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3638 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3640 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3642 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3644 fl_align = ingpadboundary;
3645 if (!is_t4(adap->params.chip)) {
3646 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3647 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3648 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3649 ingpackboundary = 16;
3651 ingpackboundary = 1 << (ingpackboundary +
3652 X_INGPACKBOUNDARY_SHIFT);
3654 fl_align = max(ingpadboundary, ingpackboundary);
3660 * t4_fixup_host_params_compat - fix up host-dependent parameters
3661 * @adap: the adapter
3662 * @page_size: the host's Base Page Size
3663 * @cache_line_size: the host's Cache Line Size
3664 * @chip_compat: maintain compatibility with designated chip
3666 * Various registers in the chip contain values which are dependent on the
3667 * host's Base Page and Cache Line Sizes. This function will fix all of
3668 * those registers with the appropriate values as passed in ...
3670 * @chip_compat is used to limit the set of changes that are made
3671 * to be compatible with the indicated chip release. This is used by
3672 * drivers to maintain compatibility with chip register settings when
3673 * the drivers haven't [yet] been updated with new chip support.
3675 int t4_fixup_host_params_compat(struct adapter *adap,
3676 unsigned int page_size,
3677 unsigned int cache_line_size,
3678 enum chip_type chip_compat)
3680 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3681 unsigned int sge_hps = page_shift - 10;
3682 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3683 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3684 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3686 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3687 V_HOSTPAGESIZEPF0(sge_hps) |
3688 V_HOSTPAGESIZEPF1(sge_hps) |
3689 V_HOSTPAGESIZEPF2(sge_hps) |
3690 V_HOSTPAGESIZEPF3(sge_hps) |
3691 V_HOSTPAGESIZEPF4(sge_hps) |
3692 V_HOSTPAGESIZEPF5(sge_hps) |
3693 V_HOSTPAGESIZEPF6(sge_hps) |
3694 V_HOSTPAGESIZEPF7(sge_hps));
3696 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3697 t4_set_reg_field(adap, A_SGE_CONTROL,
3698 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3699 F_EGRSTATUSPAGESIZE,
3700 V_INGPADBOUNDARY(fl_align_log -
3701 X_INGPADBOUNDARY_SHIFT) |
3702 V_EGRSTATUSPAGESIZE(stat_len != 64));
3704 unsigned int pack_align;
3705 unsigned int ingpad, ingpack;
3706 unsigned int pcie_cap;
3709 * T5 introduced the separation of the Free List Padding and
3710 * Packing Boundaries. Thus, we can select a smaller Padding
3711 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3712 * Bandwidth, and use a Packing Boundary which is large enough
3713 * to avoid false sharing between CPUs, etc.
3715 * For the PCI Link, the smaller the Padding Boundary the
3716 * better. For the Memory Controller, a smaller Padding
3717 * Boundary is better until we cross under the Memory Line
3718 * Size (the minimum unit of transfer to/from Memory). If we
3719 * have a Padding Boundary which is smaller than the Memory
3720 * Line Size, that'll involve a Read-Modify-Write cycle on the
3721 * Memory Controller which is never good.
3724 /* We want the Packing Boundary to be based on the Cache Line
3725 * Size in order to help avoid False Sharing performance
3726 * issues between CPUs, etc. We also want the Packing
3727 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3728 * get best performance when the Packing Boundary is a
3729 * multiple of the Maximum Payload Size.
3731 pack_align = fl_align;
3732 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3734 unsigned int mps, mps_log;
3737 /* The PCIe Device Control Maximum Payload Size field
3738 * [bits 7:5] encodes sizes as powers of 2 starting at
3741 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3743 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3745 if (mps > pack_align)
3750 * N.B. T5 has a different interpretation of the "0" value for
3751 * the Packing Boundary. This corresponds to 16 bytes instead
3752 * of the expected 32 bytes. We never have a Packing Boundary
3753 * less than 32 bytes so we can't use that special value but
3754 * on the other hand, if we wanted 32 bytes, the best we can
3755 * really do is 64 bytes ...
3757 if (pack_align <= 16) {
3758 ingpack = X_INGPACKBOUNDARY_16B;
3760 } else if (pack_align == 32) {
3761 ingpack = X_INGPACKBOUNDARY_64B;
3764 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3766 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3767 fl_align = pack_align;
3770 /* Use the smallest Ingress Padding which isn't smaller than
3771 * the Memory Controller Read/Write Size. We'll take that as
3772 * being 8 bytes since we don't know of any system with a
3773 * wider Memory Controller Bus Width.
3775 if (is_t5(adap->params.chip))
3776 ingpad = X_INGPADBOUNDARY_32B;
3778 ingpad = X_T6_INGPADBOUNDARY_8B;
3779 t4_set_reg_field(adap, A_SGE_CONTROL,
3780 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3781 F_EGRSTATUSPAGESIZE,
3782 V_INGPADBOUNDARY(ingpad) |
3783 V_EGRSTATUSPAGESIZE(stat_len != 64));
3784 t4_set_reg_field(adap, A_SGE_CONTROL2,
3785 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3786 V_INGPACKBOUNDARY(ingpack));
3790 * Adjust various SGE Free List Host Buffer Sizes.
3792 * The first four entries are:
3796 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3797 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3799 * For the single-MTU buffers in unpacked mode we need to include
3800 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3801 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3802 * Padding boundary. All of these are accommodated in the Factory
3803 * Default Firmware Configuration File but we need to adjust it for
3804 * this host's cache line size.
3806 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3807 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3808 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3810 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3811 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3814 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3820 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3821 * @adap: the adapter
3822 * @page_size: the host's Base Page Size
3823 * @cache_line_size: the host's Cache Line Size
3825 * Various registers in T4 contain values which are dependent on the
3826 * host's Base Page and Cache Line Sizes. This function will fix all of
3827 * those registers with the appropriate values as passed in ...
3829 * This routine makes changes which are compatible with T4 chips.
3831 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3832 unsigned int cache_line_size)
3834 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3839 * t4_fw_initialize - ask FW to initialize the device
3840 * @adap: the adapter
3841 * @mbox: mailbox to use for the FW command
3843 * Issues a command to FW to partially initialize the device. This
3844 * performs initialization that generally doesn't depend on user input.
3846 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3848 struct fw_initialize_cmd c;
3850 memset(&c, 0, sizeof(c));
3851 INIT_CMD(c, INITIALIZE, WRITE);
3852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3856 * t4_query_params_rw - query FW or device parameters
3857 * @adap: the adapter
3858 * @mbox: mailbox to use for the FW command
3861 * @nparams: the number of parameters
3862 * @params: the parameter names
3863 * @val: the parameter values
3864 * @rw: Write and read flag
3866 * Reads the value of FW or device parameters. Up to 7 parameters can be
3869 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3870 unsigned int pf, unsigned int vf,
3871 unsigned int nparams, const u32 *params,
3876 struct fw_params_cmd c;
3877 __be32 *p = &c.param[0].mnem;
3882 memset(&c, 0, sizeof(c));
3883 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3884 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3885 V_FW_PARAMS_CMD_PFN(pf) |
3886 V_FW_PARAMS_CMD_VFN(vf));
3887 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3889 for (i = 0; i < nparams; i++) {
3890 *p++ = cpu_to_be32(*params++);
3892 *p = cpu_to_be32(*(val + i));
3896 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3898 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3899 *val++ = be32_to_cpu(*p);
3903 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3904 unsigned int vf, unsigned int nparams, const u32 *params,
3907 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3911 * t4_set_params_timeout - sets FW or device parameters
3912 * @adap: the adapter
3913 * @mbox: mailbox to use for the FW command
3916 * @nparams: the number of parameters
3917 * @params: the parameter names
3918 * @val: the parameter values
3919 * @timeout: the timeout time
3921 * Sets the value of FW or device parameters. Up to 7 parameters can be
3922 * specified at once.
3924 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3925 unsigned int pf, unsigned int vf,
3926 unsigned int nparams, const u32 *params,
3927 const u32 *val, int timeout)
3929 struct fw_params_cmd c;
3930 __be32 *p = &c.param[0].mnem;
3935 memset(&c, 0, sizeof(c));
3936 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3937 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3938 V_FW_PARAMS_CMD_PFN(pf) |
3939 V_FW_PARAMS_CMD_VFN(vf));
3940 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3943 *p++ = cpu_to_be32(*params++);
3944 *p++ = cpu_to_be32(*val++);
3947 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3950 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3951 unsigned int vf, unsigned int nparams, const u32 *params,
3954 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3955 FW_CMD_MAX_TIMEOUT);
3959 * t4_alloc_vi_func - allocate a virtual interface
3960 * @adap: the adapter
3961 * @mbox: mailbox to use for the FW command
3962 * @port: physical port associated with the VI
3963 * @pf: the PF owning the VI
3964 * @vf: the VF owning the VI
3965 * @nmac: number of MAC addresses needed (1 to 5)
3966 * @mac: the MAC addresses of the VI
3967 * @rss_size: size of RSS table slice associated with this VI
3968 * @portfunc: which Port Application Function MAC Address is desired
3969 * @idstype: Intrusion Detection Type
3971 * Allocates a virtual interface for the given physical port. If @mac is
3972 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3973 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3974 * stored consecutively so the space needed is @nmac * 6 bytes.
3975 * Returns a negative error number or the non-negative VI id.
3977 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
3978 unsigned int port, unsigned int pf, unsigned int vf,
3979 unsigned int nmac, u8 *mac, unsigned int *rss_size,
3980 unsigned int portfunc, unsigned int idstype)
3985 memset(&c, 0, sizeof(c));
3986 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3987 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3988 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3989 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3990 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
3991 V_FW_VI_CMD_FUNC(portfunc));
3992 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3995 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4000 memcpy(mac, c.mac, sizeof(c.mac));
4003 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4006 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4009 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4012 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4017 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
4018 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
4022 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4023 * @adap: the adapter
4024 * @mbox: mailbox to use for the FW command
4025 * @port: physical port associated with the VI
4026 * @pf: the PF owning the VI
4027 * @vf: the VF owning the VI
4028 * @nmac: number of MAC addresses needed (1 to 5)
4029 * @mac: the MAC addresses of the VI
4030 * @rss_size: size of RSS table slice associated with this VI
4032 * Backwards compatible and convieniance routine to allocate a Virtual
4033 * Interface with a Ethernet Port Application Function and Intrustion
4034 * Detection System disabled.
4036 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4037 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4038 unsigned int *rss_size)
4040 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4045 * t4_free_vi - free a virtual interface
4046 * @adap: the adapter
4047 * @mbox: mailbox to use for the FW command
4048 * @pf: the PF owning the VI
4049 * @vf: the VF owning the VI
4050 * @viid: virtual interface identifiler
4052 * Free a previously allocated virtual interface.
4054 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4055 unsigned int vf, unsigned int viid)
4059 memset(&c, 0, sizeof(c));
4060 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4063 c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
4064 V_FW_VI_CMD_VFN(vf));
4065 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
4066 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
4069 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4071 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4075 * t4_set_rxmode - set Rx properties of a virtual interface
4076 * @adap: the adapter
4077 * @mbox: mailbox to use for the FW command
4079 * @mtu: the new MTU or -1
4080 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4081 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4082 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4083 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
4085 * @sleep_ok: if true we may sleep while awaiting command completion
4087 * Sets Rx properties of a virtual interface.
4089 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4090 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4093 struct fw_vi_rxmode_cmd c;
4095 /* convert to FW values */
4097 mtu = M_FW_VI_RXMODE_CMD_MTU;
4099 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4101 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4103 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4105 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4107 memset(&c, 0, sizeof(c));
4108 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
4109 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4110 V_FW_VI_RXMODE_CMD_VIID(viid));
4111 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4112 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4113 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4114 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4115 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4116 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4118 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
4121 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4125 * t4_change_mac - modifies the exact-match filter for a MAC address
4126 * @adap: the adapter
4127 * @mbox: mailbox to use for the FW command
4129 * @idx: index of existing filter for old value of MAC address, or -1
4130 * @addr: the new MAC address value
4131 * @persist: whether a new MAC allocation should be persistent
4132 * @add_smt: if true also add the address to the HW SMT
4134 * Modifies an exact-match filter and sets it to the new MAC address if
4135 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4136 * latter case the address is added persistently if @persist is %true.
4138 * Note that in general it is not possible to modify the value of a given
4139 * filter so the generic way to modify an address filter is to free the one
4140 * being used by the old address value and allocate a new filter for the
4141 * new address value.
4143 * Returns a negative error number or the index of the filter with the new
4144 * MAC value. Note that this index may differ from @idx.
4146 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4147 int idx, const u8 *addr, bool persist, bool add_smt)
4150 struct fw_vi_mac_cmd c;
4151 struct fw_vi_mac_exact *p = c.u.exact;
4152 int max_mac_addr = adap->params.arch.mps_tcam_size;
4154 if (idx < 0) /* new allocation */
4155 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4156 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4158 memset(&c, 0, sizeof(c));
4159 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4160 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4161 V_FW_VI_MAC_CMD_VIID(viid));
4162 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4163 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4164 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4165 V_FW_VI_MAC_CMD_IDX(idx));
4166 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4169 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4171 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4173 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4174 if (ret >= max_mac_addr)
4181 * t4_enable_vi_params - enable/disable a virtual interface
4182 * @adap: the adapter
4183 * @mbox: mailbox to use for the FW command
4185 * @rx_en: 1=enable Rx, 0=disable Rx
4186 * @tx_en: 1=enable Tx, 0=disable Tx
4187 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4189 * Enables/disables a virtual interface. Note that setting DCB Enable
4190 * only makes sense when enabling a Virtual Interface ...
4192 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4193 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4195 struct fw_vi_enable_cmd c;
4197 memset(&c, 0, sizeof(c));
4198 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4199 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4200 V_FW_VI_ENABLE_CMD_VIID(viid));
4201 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4202 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4203 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4206 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4208 return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4212 * t4_enable_vi - enable/disable a virtual interface
4213 * @adap: the adapter
4214 * @mbox: mailbox to use for the FW command
4216 * @rx_en: 1=enable Rx, 0=disable Rx
4217 * @tx_en: 1=enable Tx, 0=disable Tx
4219 * Enables/disables a virtual interface. Note that setting DCB Enable
4220 * only makes sense when enabling a Virtual Interface ...
4222 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4223 bool rx_en, bool tx_en)
4225 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4229 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4230 * @adap: the adapter
4231 * @mbox: mailbox to use for the FW command
4232 * @start: %true to enable the queues, %false to disable them
4233 * @pf: the PF owning the queues
4234 * @vf: the VF owning the queues
4235 * @iqid: ingress queue id
4236 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4237 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4239 * Starts or stops an ingress queue and its associated FLs, if any.
4241 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4242 unsigned int pf, unsigned int vf, unsigned int iqid,
4243 unsigned int fl0id, unsigned int fl1id)
4247 memset(&c, 0, sizeof(c));
4248 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4250 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4251 V_FW_IQ_CMD_IQSTOP(!start) |
4253 c.iqid = cpu_to_be16(iqid);
4254 c.fl0id = cpu_to_be16(fl0id);
4255 c.fl1id = cpu_to_be16(fl1id);
4257 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4258 V_FW_IQ_CMD_VFN(vf));
4259 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4261 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4266 * t4_iq_free - free an ingress queue and its FLs
4267 * @adap: the adapter
4268 * @mbox: mailbox to use for the FW command
4269 * @pf: the PF owning the queues
4270 * @vf: the VF owning the queues
4271 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4272 * @iqid: ingress queue id
4273 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4274 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4276 * Frees an ingress queue and its associated FLs, if any.
4278 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4279 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4280 unsigned int fl0id, unsigned int fl1id)
4284 memset(&c, 0, sizeof(c));
4285 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4288 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4289 V_FW_IQ_CMD_VFN(vf));
4290 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4291 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4292 c.iqid = cpu_to_be16(iqid);
4293 c.fl0id = cpu_to_be16(fl0id);
4294 c.fl1id = cpu_to_be16(fl1id);
4296 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4298 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4302 * t4_eth_eq_free - free an Ethernet egress queue
4303 * @adap: the adapter
4304 * @mbox: mailbox to use for the FW command
4305 * @pf: the PF owning the queue
4306 * @vf: the VF owning the queue
4307 * @eqid: egress queue id
4309 * Frees an Ethernet egress queue.
4311 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4312 unsigned int vf, unsigned int eqid)
4314 struct fw_eq_eth_cmd c;
4316 memset(&c, 0, sizeof(c));
4317 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4318 F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4320 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4321 V_FW_IQ_CMD_VFN(vf));
4322 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4323 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4325 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4327 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4331 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4332 * @link_down_rc: Link Down Reason Code
4334 * Returns a string representation of the Link Down Reason Code.
4336 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4338 static const char * const reason[] = {
4341 "Auto-negotiation Failure",
4343 "Insufficient Airflow",
4344 "Unable To Determine Reason",
4345 "No RX Signal Detected",
4349 if (link_down_rc >= ARRAY_SIZE(reason))
4350 return "Bad Reason Code";
4352 return reason[link_down_rc];
4355 /* Return the highest speed set in the port capabilities, in Mb/s. */
4356 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
4358 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4360 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4364 TEST_SPEED_RETURN(100G, 100000);
4365 TEST_SPEED_RETURN(50G, 50000);
4366 TEST_SPEED_RETURN(40G, 40000);
4367 TEST_SPEED_RETURN(25G, 25000);
4368 TEST_SPEED_RETURN(10G, 10000);
4369 TEST_SPEED_RETURN(1G, 1000);
4370 TEST_SPEED_RETURN(100M, 100);
4372 #undef TEST_SPEED_RETURN
4378 * t4_handle_get_port_info - process a FW reply message
4379 * @pi: the port info
4380 * @rpl: start of the FW message
4382 * Processes a GET_PORT_INFO FW reply message.
4384 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4386 const struct fw_port_cmd *cmd = (const void *)rpl;
4387 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
4388 fw_port_cap32_t pcaps, acaps, linkattr;
4389 struct link_config *lc = &pi->link_cfg;
4390 struct adapter *adapter = pi->adapter;
4391 enum fw_port_module_type mod_type;
4392 enum fw_port_type port_type;
4393 unsigned int speed, fc, fec;
4394 int link_ok, linkdnrc;
4396 /* Extract the various fields from the Port Information message.
4399 case FW_PORT_ACTION_GET_PORT_INFO: {
4400 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
4402 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
4403 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
4404 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
4405 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
4406 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
4407 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
4409 /* Unfortunately the format of the Link Status in the old
4410 * 16-bit Port Information message isn't the same as the
4411 * 16-bit Port Capabilities bitfield used everywhere else ...
4414 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
4415 linkattr |= FW_PORT_CAP32_FC_RX;
4416 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
4417 linkattr |= FW_PORT_CAP32_FC_TX;
4418 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4419 linkattr |= FW_PORT_CAP32_SPEED_100M;
4420 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4421 linkattr |= FW_PORT_CAP32_SPEED_1G;
4422 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4423 linkattr |= FW_PORT_CAP32_SPEED_10G;
4424 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
4425 linkattr |= FW_PORT_CAP32_SPEED_25G;
4426 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
4427 linkattr |= FW_PORT_CAP32_SPEED_40G;
4428 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
4429 linkattr |= FW_PORT_CAP32_SPEED_100G;
4434 case FW_PORT_ACTION_GET_PORT_INFO32: {
4436 be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4438 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
4439 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
4440 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
4441 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
4442 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4443 acaps = be32_to_cpu(cmd->u.info32.acaps32);
4444 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
4449 dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4450 be32_to_cpu(cmd->action_to_len16));
4454 fec = fwcap_to_cc_fec(acaps);
4456 fc = fwcap_to_cc_pause(linkattr);
4457 speed = fwcap_to_speed(linkattr);
4459 if (mod_type != pi->mod_type) {
4461 pi->port_type = port_type;
4462 pi->mod_type = mod_type;
4463 t4_os_portmod_changed(adapter, pi->pidx);
4465 if (link_ok != lc->link_ok || speed != lc->speed ||
4466 fc != lc->fc || fec != lc->fec) { /* something changed */
4467 if (!link_ok && lc->link_ok) {
4468 lc->link_down_rc = linkdnrc;
4469 dev_warn(adap, "Port %d link down, reason: %s\n",
4470 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
4472 lc->link_ok = link_ok;
4477 lc->acaps = acaps & ADVERT_MASK;
4479 if (lc->acaps & FW_PORT_CAP32_ANEG) {
4480 lc->autoneg = AUTONEG_ENABLE;
4482 /* When Autoneg is disabled, user needs to set
4484 * Similar to cxgb4_ethtool.c: set_link_ksettings
4487 lc->requested_speed = fwcap_to_speed(acaps);
4488 lc->autoneg = AUTONEG_DISABLE;
4494 * t4_handle_fw_rpl - process a FW reply message
4495 * @adap: the adapter
4496 * @rpl: start of the FW message
4498 * Processes a FW message, such as link state change messages.
4500 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4502 u8 opcode = *(const u8 *)rpl;
4505 * This might be a port command ... this simplifies the following
4506 * conditionals ... We can get away with pre-dereferencing
4507 * action_to_len16 because it's in the first 16 bytes and all messages
4508 * will be at least that long.
4510 const struct fw_port_cmd *p = (const void *)rpl;
4511 unsigned int action =
4512 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4514 if (opcode == FW_PORT_CMD &&
4515 (action == FW_PORT_ACTION_GET_PORT_INFO ||
4516 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
4517 /* link/module state change message */
4518 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4519 struct port_info *pi = NULL;
4522 for_each_port(adap, i) {
4523 pi = adap2pinfo(adap, i);
4524 if (pi->tx_chan == chan)
4528 t4_handle_get_port_info(pi, rpl);
4530 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4536 void t4_reset_link_config(struct adapter *adap, int idx)
4538 struct port_info *pi = adap2pinfo(adap, idx);
4539 struct link_config *lc = &pi->link_cfg;
4542 lc->requested_speed = 0;
4543 lc->requested_fc = 0;
4549 * init_link_config - initialize a link's SW state
4550 * @lc: structure holding the link state
4551 * @pcaps: link Port Capabilities
4552 * @acaps: link current Advertised Port Capabilities
4554 * Initializes the SW state maintained for each link, including the link's
4555 * capabilities and default speed/flow-control/autonegotiation settings.
4557 void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
4558 fw_port_cap32_t acaps)
4561 lc->requested_speed = 0;
4563 lc->requested_fc = 0;
4567 * For Forward Error Control, we default to whatever the Firmware
4568 * tells us the Link is currently advertising.
4570 lc->auto_fec = fwcap_to_cc_fec(acaps);
4571 lc->requested_fec = FEC_AUTO;
4572 lc->fec = lc->auto_fec;
4574 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
4575 lc->acaps = lc->pcaps & ADVERT_MASK;
4576 lc->autoneg = AUTONEG_ENABLE;
4577 lc->requested_fc |= PAUSE_AUTONEG;
4580 lc->autoneg = AUTONEG_DISABLE;
4585 * t4_wait_dev_ready - wait till to reads of registers work
4587 * Right after the device is RESET is can take a small amount of time
4588 * for it to respond to register reads. Until then, all reads will
4589 * return either 0xff...ff or 0xee...ee. Return an error if reads
4590 * don't work within a reasonable time frame.
4592 static int t4_wait_dev_ready(struct adapter *adapter)
4596 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4598 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4602 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4603 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4606 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4612 u32 vendor_and_model_id;
4616 int t4_get_flash_params(struct adapter *adapter)
4619 * Table for non-Numonix supported flash parts. Numonix parts are left
4620 * to the preexisting well-tested code. All flash parts have 64KB
4623 static struct flash_desc supported_flash[] = {
4624 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4629 unsigned int part, manufacturer;
4630 unsigned int density, size;
4633 * Issue a Read ID Command to the Flash part. We decode supported
4634 * Flash parts and their sizes from this. There's a newer Query
4635 * Command which can retrieve detailed geometry information but
4636 * many Flash parts don't support it.
4638 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4640 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4641 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4645 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4646 if (supported_flash[part].vendor_and_model_id == flashid) {
4647 adapter->params.sf_size =
4648 supported_flash[part].size_mb;
4649 adapter->params.sf_nsec =
4650 adapter->params.sf_size / SF_SEC_SIZE;
4655 manufacturer = flashid & 0xff;
4656 switch (manufacturer) {
4657 case 0x20: { /* Micron/Numonix */
4659 * This Density -> Size decoding table is taken from Micron
4662 density = (flashid >> 16) & 0xff;
4665 size = 1 << 20; /* 1MB */
4668 size = 1 << 21; /* 2MB */
4671 size = 1 << 22; /* 4MB */
4674 size = 1 << 23; /* 8MB */
4677 size = 1 << 24; /* 16MB */
4680 size = 1 << 25; /* 32MB */
4683 size = 1 << 26; /* 64MB */
4686 size = 1 << 27; /* 128MB */
4689 size = 1 << 28; /* 256MB */
4692 dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
4697 adapter->params.sf_size = size;
4698 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4702 dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
4708 * We should reject adapters with FLASHes which are too small. So, emit
4711 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4712 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4713 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4718 static void set_pcie_completion_timeout(struct adapter *adapter,
4724 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4726 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4729 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4734 * t4_get_chip_type - Determine chip type from device ID
4735 * @adap: the adapter
4736 * @ver: adapter version
4738 int t4_get_chip_type(struct adapter *adap, int ver)
4740 enum chip_type chip = 0;
4741 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4743 /* Retrieve adapter's device ID */
4746 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4749 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4752 dev_err(adap, "Device %d is not supported\n",
4753 adap->params.pci.device_id);
4761 * t4_prep_adapter - prepare SW and HW for operation
4762 * @adapter: the adapter
4764 * Initialize adapter SW state for the various HW modules, set initial
4765 * values for some adapter tunables, take PHYs out of reset, and
4766 * initialize the MDIO interface.
4768 int t4_prep_adapter(struct adapter *adapter)
4773 ret = t4_wait_dev_ready(adapter);
4777 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
4778 adapter->params.pci.device_id = adapter->pdev->id.device_id;
4779 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
4782 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
4783 * ADAPTER (VERSION << 4 | REVISION)
4785 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
4786 adapter->params.chip = 0;
4789 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4790 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
4791 adapter->params.arch.mps_tcam_size =
4792 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4793 adapter->params.arch.mps_rplc_size = 128;
4794 adapter->params.arch.nchan = NCHAN;
4795 adapter->params.arch.vfcount = 128;
4798 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4799 adapter->params.arch.sge_fl_db = 0;
4800 adapter->params.arch.mps_tcam_size =
4801 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4802 adapter->params.arch.mps_rplc_size = 256;
4803 adapter->params.arch.nchan = 2;
4804 adapter->params.arch.vfcount = 256;
4807 dev_err(adapter, "%s: Device %d is not supported\n",
4808 __func__, adapter->params.pci.device_id);
4812 adapter->params.pci.vpd_cap_addr =
4813 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4815 ret = t4_get_flash_params(adapter);
4817 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
4822 adapter->params.cim_la_size = CIMLA_SIZE;
4824 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4827 * Default port and clock for debugging in case we can't reach FW.
4829 adapter->params.nports = 1;
4830 adapter->params.portvec = 1;
4831 adapter->params.vpd.cclk = 50000;
4833 /* Set pci completion timeout value to 4 seconds. */
4834 set_pcie_completion_timeout(adapter, 0xd);
4839 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
4840 * @adapter: the adapter
4841 * @qid: the Queue ID
4842 * @qtype: the Ingress or Egress type for @qid
4843 * @pbar2_qoffset: BAR2 Queue Offset
4844 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4846 * Returns the BAR2 SGE Queue Registers information associated with the
4847 * indicated Absolute Queue ID. These are passed back in return value
4848 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4849 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4851 * This may return an error which indicates that BAR2 SGE Queue
4852 * registers aren't available. If an error is not returned, then the
4853 * following values are returned:
4855 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4856 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4858 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4859 * require the "Inferred Queue ID" ability may be used. E.g. the
4860 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4861 * then these "Inferred Queue ID" register may not be used.
4863 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
4864 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
4865 unsigned int *pbar2_qid)
4867 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4868 u64 bar2_page_offset, bar2_qoffset;
4869 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4872 * T4 doesn't support BAR2 SGE Queue registers.
4874 if (is_t4(adapter->params.chip))
4878 * Get our SGE Page Size parameters.
4880 page_shift = adapter->params.sge.hps + 10;
4881 page_size = 1 << page_shift;
4884 * Get the right Queues per Page parameters for our Queue.
4886 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
4887 adapter->params.sge.eq_qpp :
4888 adapter->params.sge.iq_qpp);
4889 qpp_mask = (1 << qpp_shift) - 1;
4892 * Calculate the basics of the BAR2 SGE Queue register area:
4893 * o The BAR2 page the Queue registers will be in.
4894 * o The BAR2 Queue ID.
4895 * o The BAR2 Queue ID Offset into the BAR2 page.
4897 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4898 bar2_qid = qid & qpp_mask;
4899 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4902 * If the BAR2 Queue ID Offset is less than the Page Size, then the
4903 * hardware will infer the Absolute Queue ID simply from the writes to
4904 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4905 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4906 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4907 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4908 * from the BAR2 Page and BAR2 Queue ID.
4910 * One important censequence of this is that some BAR2 SGE registers
4911 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4912 * there. But other registers synthesize the SGE Queue ID purely
4913 * from the writes to the registers -- the Write Combined Doorbell
4914 * Buffer is a good example. These BAR2 SGE Registers are only
4915 * available for those BAR2 SGE Register areas where the SGE Absolute
4916 * Queue ID can be inferred from simple writes.
4918 bar2_qoffset = bar2_page_offset;
4919 bar2_qinferred = (bar2_qid_offset < page_size);
4920 if (bar2_qinferred) {
4921 bar2_qoffset += bar2_qid_offset;
4925 *pbar2_qoffset = bar2_qoffset;
4926 *pbar2_qid = bar2_qid;
4931 * t4_init_sge_params - initialize adap->params.sge
4932 * @adapter: the adapter
4934 * Initialize various fields of the adapter's SGE Parameters structure.
4936 int t4_init_sge_params(struct adapter *adapter)
4938 struct sge_params *sge_params = &adapter->params.sge;
4940 unsigned int s_hps, s_qpp;
4943 * Extract the SGE Page Size for our PF.
4945 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
4946 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
4948 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
4951 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
4953 s_qpp = (S_QUEUESPERPAGEPF0 +
4954 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
4955 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
4956 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4957 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
4958 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4964 * t4_init_tp_params - initialize adap->params.tp
4965 * @adap: the adapter
4967 * Initialize various fields of the adapter's TP Parameters structure.
4969 int t4_init_tp_params(struct adapter *adap)
4974 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
4975 adap->params.tp.tre = G_TIMERRESOLUTION(v);
4976 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
4978 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4979 for (chan = 0; chan < NCHAN; chan++)
4980 adap->params.tp.tx_modq[chan] = chan;
4983 * Cache the adapter's Compressed Filter Mode and global Incress
4986 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4987 &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
4988 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4989 &adap->params.tp.ingress_config, 1,
4990 A_TP_INGRESS_CONFIG);
4992 /* For T6, cache the adapter's compressed error vector
4993 * and passing outer header info for encapsulated packets.
4995 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4996 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
4997 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5001 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5002 * shift positions of several elements of the Compressed Filter Tuple
5003 * for this adapter which we need frequently ...
5005 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5006 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5007 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5008 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5012 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5013 * represents the presense of an Outer VLAN instead of a VNIC ID.
5015 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5016 adap->params.tp.vnic_shift = -1;
5022 * t4_filter_field_shift - calculate filter field shift
5023 * @adap: the adapter
5024 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5026 * Return the shift position of a filter field within the Compressed
5027 * Filter Tuple. The filter field is specified via its selection bit
5028 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5030 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5032 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5036 if ((filter_mode & filter_sel) == 0)
5039 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5040 switch (filter_mode & sel) {
5042 field_shift += W_FT_FCOE;
5045 field_shift += W_FT_PORT;
5048 field_shift += W_FT_VNIC_ID;
5051 field_shift += W_FT_VLAN;
5054 field_shift += W_FT_TOS;
5057 field_shift += W_FT_PROTOCOL;
5060 field_shift += W_FT_ETHERTYPE;
5063 field_shift += W_FT_MACMATCH;
5066 field_shift += W_FT_MPSHITTYPE;
5068 case F_FRAGMENTATION:
5069 field_shift += W_FT_FRAGMENTATION;
5076 int t4_init_rss_mode(struct adapter *adap, int mbox)
5079 struct fw_rss_vi_config_cmd rvc;
5081 memset(&rvc, 0, sizeof(rvc));
5083 for_each_port(adap, i) {
5084 struct port_info *p = adap2pinfo(adap, i);
5086 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5087 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5088 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5089 rvc.retval_len16 = htonl(FW_LEN16(rvc));
5090 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5093 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5098 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5100 unsigned int fw_caps = adap->params.fw_caps_support;
5101 fw_port_cap32_t pcaps, acaps;
5102 enum fw_port_type port_type;
5103 struct fw_port_cmd cmd;
5109 memset(&cmd, 0, sizeof(cmd));
5111 for_each_port(adap, i) {
5112 struct port_info *pi = adap2pinfo(adap, i);
5113 unsigned int rss_size = 0;
5115 while ((adap->params.portvec & (1 << j)) == 0)
5118 /* If we haven't yet determined whether we're talking to
5119 * Firmware which knows the new 32-bit Port Capabilities, it's
5120 * time to find out now. This will also tell new Firmware to
5121 * send us Port Status Updates using the new 32-bit Port
5122 * Capabilities version of the Port Information message.
5124 if (fw_caps == FW_CAPS_UNKNOWN) {
5125 u32 param, val, caps;
5127 caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
5128 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5129 V_FW_PARAMS_PARAM_X(caps));
5131 ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m,
5133 fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
5134 adap->params.fw_caps_support = fw_caps;
5137 memset(&cmd, 0, sizeof(cmd));
5138 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5141 V_FW_PORT_CMD_PORTID(j));
5142 action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
5143 FW_PORT_ACTION_GET_PORT_INFO32;
5144 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
5146 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5150 /* Extract the various fields from the Port Information message.
5152 if (fw_caps == FW_CAPS16) {
5154 be32_to_cpu(cmd.u.info.lstatus_to_modtype);
5156 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
5157 mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
5158 (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
5159 pcaps = be16_to_cpu(cmd.u.info.pcap);
5160 acaps = be16_to_cpu(cmd.u.info.acap);
5161 pcaps = fwcaps16_to_caps32(pcaps);
5162 acaps = fwcaps16_to_caps32(acaps);
5165 be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5167 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5168 mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5169 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
5171 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5172 acaps = be32_to_cpu(cmd.u.info32.acaps32);
5175 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5181 pi->rss_size = rss_size;
5182 t4_os_set_hw_addr(adap, i, addr);
5184 pi->port_type = port_type;
5185 pi->mdio_addr = mdio_addr;
5186 pi->mod_type = FW_PORT_MOD_TYPE_NA;
5188 init_link_config(&pi->link_cfg, pcaps, acaps);