4 * Copyright(c) 2014-2017 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
36 #include <rte_interrupts.h>
38 #include <rte_debug.h>
40 #include <rte_atomic.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_memory.h>
43 #include <rte_tailq.h>
45 #include <rte_alarm.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev_driver.h>
48 #include <rte_malloc.h>
49 #include <rte_random.h>
51 #include <rte_byteorder.h>
55 #include "t4_regs_values.h"
56 #include "t4fw_interface.h"
59 * t4_read_mtu_tbl - returns the values in the HW path MTU table
61 * @mtus: where to store the MTU values
62 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
64 * Reads the HW path MTU table.
66 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
71 for (i = 0; i < NMTUS; ++i) {
72 t4_write_reg(adap, A_TP_MTU_TABLE,
73 V_MTUINDEX(0xff) | V_MTUVALUE(i));
74 v = t4_read_reg(adap, A_TP_MTU_TABLE);
75 mtus[i] = G_MTUVALUE(v);
77 mtu_log[i] = G_MTUWIDTH(v);
82 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
84 * @addr: the indirect TP register address
85 * @mask: specifies the field within the register to modify
86 * @val: new value for the field
88 * Sets a field of an indirect TP register to the given value.
90 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
91 unsigned int mask, unsigned int val)
93 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
94 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
95 t4_write_reg(adap, A_TP_PIO_DATA, val);
98 /* The minimum additive increment value for the congestion control table */
99 #define CC_MIN_INCR 2U
102 * t4_load_mtus - write the MTU and congestion control HW tables
104 * @mtus: the values for the MTU table
105 * @alpha: the values for the congestion control alpha parameter
106 * @beta: the values for the congestion control beta parameter
108 * Write the HW MTU table with the supplied MTUs and the high-speed
109 * congestion control table with the supplied alpha, beta, and MTUs.
110 * We write the two tables together because the additive increments
111 * depend on the MTUs.
113 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
114 const unsigned short *alpha, const unsigned short *beta)
116 static const unsigned int avg_pkts[NCCTRL_WIN] = {
117 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
118 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
119 28672, 40960, 57344, 81920, 114688, 163840, 229376
124 for (i = 0; i < NMTUS; ++i) {
125 unsigned int mtu = mtus[i];
126 unsigned int log2 = cxgbe_fls(mtu);
128 if (!(mtu & ((1 << log2) >> 2))) /* round */
130 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
131 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
133 for (w = 0; w < NCCTRL_WIN; ++w) {
136 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
139 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
140 (w << 16) | (beta[w] << 13) | inc);
146 * t4_wait_op_done_val - wait until an operation is completed
147 * @adapter: the adapter performing the operation
148 * @reg: the register to check for completion
149 * @mask: a single-bit field within @reg that indicates completion
150 * @polarity: the value of the field when the operation is completed
151 * @attempts: number of check iterations
152 * @delay: delay in usecs between iterations
153 * @valp: where to store the value of the register at completion time
155 * Wait until an operation is completed by checking a bit in a register
156 * up to @attempts times. If @valp is not NULL the value of the register
157 * at the time it indicated completion is stored there. Returns 0 if the
158 * operation completes and -EAGAIN otherwise.
160 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
161 int polarity, int attempts, int delay, u32 *valp)
164 u32 val = t4_read_reg(adapter, reg);
166 if (!!(val & mask) == polarity) {
179 * t4_set_reg_field - set a register field to a value
180 * @adapter: the adapter to program
181 * @addr: the register address
182 * @mask: specifies the portion of the register to modify
183 * @val: the new value for the register field
185 * Sets a register field specified by the supplied mask to the
188 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
191 u32 v = t4_read_reg(adapter, addr) & ~mask;
193 t4_write_reg(adapter, addr, v | val);
194 (void)t4_read_reg(adapter, addr); /* flush */
198 * t4_read_indirect - read indirectly addressed registers
200 * @addr_reg: register holding the indirect address
201 * @data_reg: register holding the value of the indirect register
202 * @vals: where the read register values are stored
203 * @nregs: how many indirect registers to read
204 * @start_idx: index of first indirect register to read
206 * Reads registers that are accessed indirectly through an address/data
209 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
210 unsigned int data_reg, u32 *vals, unsigned int nregs,
211 unsigned int start_idx)
214 t4_write_reg(adap, addr_reg, start_idx);
215 *vals++ = t4_read_reg(adap, data_reg);
221 * t4_write_indirect - write indirectly addressed registers
223 * @addr_reg: register holding the indirect addresses
224 * @data_reg: register holding the value for the indirect registers
225 * @vals: values to write
226 * @nregs: how many indirect registers to write
227 * @start_idx: address of first indirect register to write
229 * Writes a sequential block of registers that are accessed indirectly
230 * through an address/data register pair.
232 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
233 unsigned int data_reg, const u32 *vals,
234 unsigned int nregs, unsigned int start_idx)
237 t4_write_reg(adap, addr_reg, start_idx++);
238 t4_write_reg(adap, data_reg, *vals++);
243 * t4_report_fw_error - report firmware error
246 * The adapter firmware can indicate error conditions to the host.
247 * If the firmware has indicated an error, print out the reason for
248 * the firmware error.
250 static void t4_report_fw_error(struct adapter *adap)
252 static const char * const reason[] = {
253 "Crash", /* PCIE_FW_EVAL_CRASH */
254 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
255 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
256 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
257 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
258 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
259 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
260 "Reserved", /* reserved */
264 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
265 if (pcie_fw & F_PCIE_FW_ERR)
266 pr_err("%s: Firmware reports adapter error: %s\n",
267 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
271 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
273 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
276 for ( ; nflit; nflit--, mbox_addr += 8)
277 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
281 * Handle a FW assertion reported in a mailbox.
283 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
285 struct fw_debug_cmd asrt;
287 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
288 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
289 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
290 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
293 #define X_CIM_PF_NOACCESS 0xeeeeeeee
296 * If the Host OS Driver needs locking arround accesses to the mailbox, this
297 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
299 /* makes single-statement usage a bit cleaner ... */
300 #ifdef T4_OS_NEEDS_MBOX_LOCKING
301 #define T4_OS_MBOX_LOCKING(x) x
303 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
307 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
309 * @mbox: index of the mailbox to use
310 * @cmd: the command to write
311 * @size: command length in bytes
312 * @rpl: where to optionally store the reply
313 * @sleep_ok: if true we may sleep while awaiting command completion
314 * @timeout: time to wait for command to finish before timing out
315 * (negative implies @sleep_ok=false)
317 * Sends the given command to FW through the selected mailbox and waits
318 * for the FW to execute the command. If @rpl is not %NULL it is used to
319 * store the FW's reply to the command. The command and its optional
320 * reply are of the same length. Some FW commands like RESET and
321 * INITIALIZE can take a considerable amount of time to execute.
322 * @sleep_ok determines whether we may sleep while awaiting the response.
323 * If sleeping is allowed we use progressive backoff otherwise we spin.
324 * Note that passing in a negative @timeout is an alternate mechanism
325 * for specifying @sleep_ok=false. This is useful when a higher level
326 * interface allows for specification of @timeout but not @sleep_ok ...
328 * Returns 0 on success or a negative errno on failure. A
329 * failure can happen either because we are not able to execute the
330 * command or FW executes it but signals an error. In the latter case
331 * the return value is the error code indicated by FW (negated).
333 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
334 const void __attribute__((__may_alias__)) *cmd,
335 int size, void *rpl, bool sleep_ok, int timeout)
338 * We delay in small increments at first in an effort to maintain
339 * responsiveness for simple, fast executing commands but then back
340 * off to larger delays to a maximum retry delay.
342 static const int delay[] = {
343 1, 1, 3, 5, 10, 10, 20, 50, 100
349 unsigned int delay_idx;
350 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
352 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
353 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
355 struct mbox_entry entry;
361 if ((size & 15) || size > MBOX_LEN) {
367 memcpy(p, (const __be64 *)cmd, size);
370 * If we have a negative timeout, that implies that we can't sleep.
377 #ifdef T4_OS_NEEDS_MBOX_LOCKING
379 * Queue ourselves onto the mailbox access list. When our entry is at
380 * the front of the list, we have rights to access the mailbox. So we
381 * wait [for a while] till we're at the front [or bail out with an
384 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
389 for (i = 0; ; i += ms) {
391 * If we've waited too long, return a busy indication. This
392 * really ought to be based on our initial position in the
393 * mailbox access list but this is a start. We very rarely
394 * contend on access to the mailbox ... Also check for a
395 * firmware error which we'll report as a device error.
397 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
398 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
399 t4_os_atomic_list_del(&entry, &adap->mbox_list,
401 t4_report_fw_error(adap);
403 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
407 * If we're at the head, break out and start the mailbox
410 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
414 * Delay for a bit before checking again ...
417 ms = delay[delay_idx]; /* last element may repeat */
418 if (delay_idx < ARRAY_SIZE(delay) - 1)
425 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
428 * Attempt to gain access to the mailbox.
430 for (i = 0; i < 4; i++) {
431 ctl = t4_read_reg(adap, ctl_reg);
433 if (v != X_MBOWNER_NONE)
438 * If we were unable to gain access, dequeue ourselves from the
439 * mailbox atomic access list and report the error to our caller.
441 if (v != X_MBOWNER_PL) {
442 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
445 t4_report_fw_error(adap);
447 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
451 * If we gain ownership of the mailbox and there's a "valid" message
452 * in it, this is likely an asynchronous error message from the
453 * firmware. So we'll report that and then proceed on with attempting
454 * to issue our own command ... which may well fail if the error
455 * presaged the firmware crashing ...
457 if (ctl & F_MBMSGVALID) {
458 dev_err(adap, "found VALID command in mbox %u: "
459 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
460 (unsigned long long)t4_read_reg64(adap, data_reg),
461 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
462 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
463 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
464 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
465 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
466 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
467 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
471 * Copy in the new mailbox command and send it on its way ...
473 for (i = 0; i < size; i += 8, p++)
474 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
476 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
477 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
478 (unsigned long long)t4_read_reg64(adap, data_reg),
479 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
480 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
481 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
482 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
483 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
484 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
485 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
487 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
488 t4_read_reg(adap, ctl_reg); /* flush write */
494 * Loop waiting for the reply; bail out if we time out or the firmware
497 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
498 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
500 ms = delay[delay_idx]; /* last element may repeat */
501 if (delay_idx < ARRAY_SIZE(delay) - 1)
508 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
509 v = t4_read_reg(adap, ctl_reg);
510 if (v == X_CIM_PF_NOACCESS)
512 if (G_MBOWNER(v) == X_MBOWNER_PL) {
513 if (!(v & F_MBMSGVALID)) {
514 t4_write_reg(adap, ctl_reg,
515 V_MBOWNER(X_MBOWNER_NONE));
519 CXGBE_DEBUG_MBOX(adap,
520 "%s: mbox %u: %016llx %016llx %016llx %016llx "
521 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
522 (unsigned long long)t4_read_reg64(adap, data_reg),
523 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
524 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
525 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
526 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
527 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
528 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
529 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
531 CXGBE_DEBUG_MBOX(adap,
532 "command %#x completed in %d ms (%ssleeping)\n",
534 i + ms, sleep_ok ? "" : "non-");
536 res = t4_read_reg64(adap, data_reg);
537 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
538 fw_asrt(adap, data_reg);
539 res = V_FW_CMD_RETVAL(EIO);
541 get_mbox_rpl(adap, rpl, size / 8, data_reg);
543 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
545 t4_os_atomic_list_del(&entry, &adap->mbox_list,
548 return -G_FW_CMD_RETVAL((int)res);
553 * We timed out waiting for a reply to our mailbox command. Report
554 * the error and also check to see if the firmware reported any
557 dev_err(adap, "command %#x in mailbox %d timed out\n",
558 *(const u8 *)cmd, mbox);
559 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
562 t4_report_fw_error(adap);
564 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
567 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
568 void *rpl, bool sleep_ok)
570 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
575 * t4_get_regs_len - return the size of the chips register set
576 * @adapter: the adapter
578 * Returns the size of the chip's BAR0 register space.
580 unsigned int t4_get_regs_len(struct adapter *adapter)
582 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
584 switch (chip_version) {
587 return T5_REGMAP_SIZE;
591 "Unsupported chip version %d\n", chip_version);
596 * t4_get_regs - read chip registers into provided buffer
598 * @buf: register buffer
599 * @buf_size: size (in bytes) of register buffer
601 * If the provided register buffer isn't large enough for the chip's
602 * full register range, the register dump will be truncated to the
603 * register buffer's size.
605 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
607 static const unsigned int t5_reg_ranges[] = {
1382 static const unsigned int t6_reg_ranges[] = {
1943 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1944 const unsigned int *reg_ranges;
1945 int reg_ranges_size, range;
1946 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1948 /* Select the right set of register ranges to dump depending on the
1949 * adapter chip type.
1951 switch (chip_version) {
1953 reg_ranges = t5_reg_ranges;
1954 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1958 reg_ranges = t6_reg_ranges;
1959 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1964 "Unsupported chip version %d\n", chip_version);
1968 /* Clear the register buffer and insert the appropriate register
1969 * values selected by the above register ranges.
1971 memset(buf, 0, buf_size);
1972 for (range = 0; range < reg_ranges_size; range += 2) {
1973 unsigned int reg = reg_ranges[range];
1974 unsigned int last_reg = reg_ranges[range + 1];
1975 u32 *bufp = (u32 *)((char *)buf + reg);
1977 /* Iterate across the register range filling in the register
1978 * buffer but don't write past the end of the register buffer.
1980 while (reg <= last_reg && bufp < buf_end) {
1981 *bufp++ = t4_read_reg(adap, reg);
1987 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1988 #define EEPROM_DELAY 10 /* 10us per poll spin */
1989 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1991 #define EEPROM_STAT_ADDR 0x7bfc
1994 * Small utility function to wait till any outstanding VPD Access is complete.
1995 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1996 * VPD Access in flight. This allows us to handle the problem of having a
1997 * previous VPD Access time out and prevent an attempt to inject a new VPD
1998 * Request before any in-flight VPD request has completed.
2000 static int t4_seeprom_wait(struct adapter *adapter)
2002 unsigned int base = adapter->params.pci.vpd_cap_addr;
2005 /* If no VPD Access is in flight, we can just return success right
2008 if (!adapter->vpd_busy)
2011 /* Poll the VPD Capability Address/Flag register waiting for it
2012 * to indicate that the operation is complete.
2014 max_poll = EEPROM_MAX_POLL;
2018 udelay(EEPROM_DELAY);
2019 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2021 /* If the operation is complete, mark the VPD as no longer
2022 * busy and return success.
2024 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2025 adapter->vpd_busy = 0;
2028 } while (--max_poll);
2030 /* Failure! Note that we leave the VPD Busy status set in order to
2031 * avoid pushing a new VPD Access request into the VPD Capability till
2032 * the current operation eventually succeeds. It's a bug to issue a
2033 * new request when an existing request is in flight and will result
2034 * in corrupt hardware state.
2040 * t4_seeprom_read - read a serial EEPROM location
2041 * @adapter: adapter to read
2042 * @addr: EEPROM virtual address
2043 * @data: where to store the read data
2045 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2046 * VPD capability. Note that this function must be called with a virtual
2049 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2051 unsigned int base = adapter->params.pci.vpd_cap_addr;
2054 /* VPD Accesses must alway be 4-byte aligned!
2056 if (addr >= EEPROMVSIZE || (addr & 3))
2059 /* Wait for any previous operation which may still be in flight to
2062 ret = t4_seeprom_wait(adapter);
2064 dev_err(adapter, "VPD still busy from previous operation\n");
2068 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2069 * for our request to complete. If it doesn't complete, note the
2070 * error and return it to our caller. Note that we do not reset the
2073 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2074 adapter->vpd_busy = 1;
2075 adapter->vpd_flag = PCI_VPD_ADDR_F;
2076 ret = t4_seeprom_wait(adapter);
2078 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2082 /* Grab the returned data, swizzle it into our endianness and
2085 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2086 *data = le32_to_cpu(*data);
2091 * t4_seeprom_write - write a serial EEPROM location
2092 * @adapter: adapter to write
2093 * @addr: virtual EEPROM address
2094 * @data: value to write
2096 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2097 * VPD capability. Note that this function must be called with a virtual
2100 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2102 unsigned int base = adapter->params.pci.vpd_cap_addr;
2107 /* VPD Accesses must alway be 4-byte aligned!
2109 if (addr >= EEPROMVSIZE || (addr & 3))
2112 /* Wait for any previous operation which may still be in flight to
2115 ret = t4_seeprom_wait(adapter);
2117 dev_err(adapter, "VPD still busy from previous operation\n");
2121 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2122 * for our request to complete. If it doesn't complete, note the
2123 * error and return it to our caller. Note that we do not reset the
2126 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2128 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2129 (u16)addr | PCI_VPD_ADDR_F);
2130 adapter->vpd_busy = 1;
2131 adapter->vpd_flag = 0;
2132 ret = t4_seeprom_wait(adapter);
2134 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2138 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2139 * request to complete. If it doesn't complete, return error.
2141 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2142 max_poll = EEPROM_MAX_POLL;
2144 udelay(EEPROM_DELAY);
2145 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2146 } while ((stats_reg & 0x1) && --max_poll);
2150 /* Return success! */
2155 * t4_seeprom_wp - enable/disable EEPROM write protection
2156 * @adapter: the adapter
2157 * @enable: whether to enable or disable write protection
2159 * Enables or disables write protection on the serial EEPROM.
2161 int t4_seeprom_wp(struct adapter *adapter, int enable)
2163 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2167 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2168 * @adap: the adapter
2169 * @vals: where the indirect register values are stored/written
2170 * @nregs: how many indirect registers to read/write
2171 * @start_idx: index of first indirect register to read/write
2172 * @rw: Read (1) or Write (0)
2174 * Access TP PIO registers through LDST
2176 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2177 unsigned int start_index, unsigned int rw)
2179 int cmd = FW_LDST_ADDRSPC_TP_PIO;
2180 struct fw_ldst_cmd c;
2184 for (i = 0 ; i < nregs; i++) {
2185 memset(&c, 0, sizeof(c));
2186 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2188 (rw ? F_FW_CMD_READ :
2190 V_FW_LDST_CMD_ADDRSPACE(cmd));
2191 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2193 c.u.addrval.addr = cpu_to_be32(start_index + i);
2194 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
2195 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2198 vals[i] = be32_to_cpu(c.u.addrval.val);
2204 * t4_read_rss_key - read the global RSS key
2205 * @adap: the adapter
2206 * @key: 10-entry array holding the 320-bit RSS key
2208 * Reads the global 320-bit RSS key.
2210 void t4_read_rss_key(struct adapter *adap, u32 *key)
2212 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2216 * t4_write_rss_key - program one of the RSS keys
2217 * @adap: the adapter
2218 * @key: 10-entry array holding the 320-bit RSS key
2219 * @idx: which RSS key to write
2221 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2222 * 0..15 the corresponding entry in the RSS key table is written,
2223 * otherwise the global RSS key is written.
2225 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2227 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2228 u8 rss_key_addr_cnt = 16;
2230 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2231 * allows access to key addresses 16-63 by using KeyWrAddrX
2232 * as index[5:4](upper 2) into key table
2234 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2235 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2236 rss_key_addr_cnt = 32;
2238 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2240 if (idx >= 0 && idx < rss_key_addr_cnt) {
2241 if (rss_key_addr_cnt > 16)
2242 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2243 V_KEYWRADDRX(idx >> 4) |
2244 V_T6_VFWRADDR(idx) | F_KEYWREN);
2246 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2247 V_KEYWRADDR(idx) | F_KEYWREN);
2252 * t4_config_rss_range - configure a portion of the RSS mapping table
2253 * @adapter: the adapter
2254 * @mbox: mbox to use for the FW command
2255 * @viid: virtual interface whose RSS subtable is to be written
2256 * @start: start entry in the table to write
2257 * @n: how many table entries to write
2258 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2259 * @nrspq: number of values in @rspq
2261 * Programs the selected part of the VI's RSS mapping table with the
2262 * provided values. If @nrspq < @n the supplied values are used repeatedly
2263 * until the full table range is populated.
2265 * The caller must ensure the values in @rspq are in the range allowed for
2268 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2269 int start, int n, const u16 *rspq, unsigned int nrspq)
2272 const u16 *rsp = rspq;
2273 const u16 *rsp_end = rspq + nrspq;
2274 struct fw_rss_ind_tbl_cmd cmd;
2276 memset(&cmd, 0, sizeof(cmd));
2277 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2278 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2279 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2280 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2283 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2284 * Queue Identifiers. These Ingress Queue IDs are packed three to
2285 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2289 int nq = min(n, 32);
2291 __be32 *qp = &cmd.iq0_to_iq2;
2294 * Set up the firmware RSS command header to send the next
2295 * "nq" Ingress Queue IDs to the firmware.
2297 cmd.niqid = cpu_to_be16(nq);
2298 cmd.startidx = cpu_to_be16(start);
2301 * "nq" more done for the start of the next loop.
2307 * While there are still Ingress Queue IDs to stuff into the
2308 * current firmware RSS command, retrieve them from the
2309 * Ingress Queue ID array and insert them into the command.
2313 * Grab up to the next 3 Ingress Queue IDs (wrapping
2314 * around the Ingress Queue ID array if necessary) and
2315 * insert them into the firmware RSS command at the
2316 * current 3-tuple position within the commad.
2320 int nqbuf = min(3, nq);
2326 while (nqbuf && nq_packed < 32) {
2333 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2334 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2335 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2339 * Send this portion of the RRS table update to the firmware;
2340 * bail out on any errors.
2342 if (is_pf4(adapter))
2343 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2346 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2355 * t4_config_vi_rss - configure per VI RSS settings
2356 * @adapter: the adapter
2357 * @mbox: mbox to use for the FW command
2360 * @defq: id of the default RSS queue for the VI.
2362 * Configures VI-specific RSS properties.
2364 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2365 unsigned int flags, unsigned int defq)
2367 struct fw_rss_vi_config_cmd c;
2369 memset(&c, 0, sizeof(c));
2370 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2371 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2372 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2373 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2374 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2375 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2376 if (is_pf4(adapter))
2377 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2379 return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2383 * t4_read_config_vi_rss - read the configured per VI RSS settings
2384 * @adapter: the adapter
2385 * @mbox: mbox to use for the FW command
2387 * @flags: where to place the configured flags
2388 * @defq: where to place the id of the default RSS queue for the VI.
2390 * Read configured VI-specific RSS properties.
2392 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2393 u64 *flags, unsigned int *defq)
2395 struct fw_rss_vi_config_cmd c;
2396 unsigned int result;
2399 memset(&c, 0, sizeof(c));
2400 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2401 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2402 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2403 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2404 ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2406 result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2408 *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2410 *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2417 * init_cong_ctrl - initialize congestion control parameters
2418 * @a: the alpha values for congestion control
2419 * @b: the beta values for congestion control
2421 * Initialize the congestion control parameters.
2423 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2427 for (i = 0; i < 9; i++) {
2481 #define INIT_CMD(var, cmd, rd_wr) do { \
2482 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2483 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2484 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2487 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2489 u32 cclk_param, cclk_val;
2493 * Ask firmware for the Core Clock since it knows how to translate the
2494 * Reference Clock ('V2') VPD field into a Core Clock value ...
2496 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2497 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2498 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2499 1, &cclk_param, &cclk_val);
2501 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2507 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2511 /* serial flash and firmware constants and flash config file constants */
2513 SF_ATTEMPTS = 10, /* max retries for SF operations */
2515 /* flash command opcodes */
2516 SF_PROG_PAGE = 2, /* program page */
2517 SF_WR_DISABLE = 4, /* disable writes */
2518 SF_RD_STATUS = 5, /* read status register */
2519 SF_WR_ENABLE = 6, /* enable writes */
2520 SF_RD_DATA_FAST = 0xb, /* read flash */
2521 SF_RD_ID = 0x9f, /* read ID */
2522 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2526 * sf1_read - read data from the serial flash
2527 * @adapter: the adapter
2528 * @byte_cnt: number of bytes to read
2529 * @cont: whether another operation will be chained
2530 * @lock: whether to lock SF for PL access only
2531 * @valp: where to store the read data
2533 * Reads up to 4 bytes of data from the serial flash. The location of
2534 * the read needs to be specified prior to calling this by issuing the
2535 * appropriate commands to the serial flash.
2537 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2538 int lock, u32 *valp)
2542 if (!byte_cnt || byte_cnt > 4)
2544 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2546 t4_write_reg(adapter, A_SF_OP,
2547 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2548 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2550 *valp = t4_read_reg(adapter, A_SF_DATA);
2555 * sf1_write - write data to the serial flash
2556 * @adapter: the adapter
2557 * @byte_cnt: number of bytes to write
2558 * @cont: whether another operation will be chained
2559 * @lock: whether to lock SF for PL access only
2560 * @val: value to write
2562 * Writes up to 4 bytes of data to the serial flash. The location of
2563 * the write needs to be specified prior to calling this by issuing the
2564 * appropriate commands to the serial flash.
2566 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2569 if (!byte_cnt || byte_cnt > 4)
2571 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2573 t4_write_reg(adapter, A_SF_DATA, val);
2574 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2575 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2576 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2580 * t4_read_flash - read words from serial flash
2581 * @adapter: the adapter
2582 * @addr: the start address for the read
2583 * @nwords: how many 32-bit words to read
2584 * @data: where to store the read data
2585 * @byte_oriented: whether to store data as bytes or as words
2587 * Read the specified number of 32-bit words from the serial flash.
2588 * If @byte_oriented is set the read data is stored as a byte array
2589 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2590 * natural endianness.
2592 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2593 unsigned int nwords, u32 *data, int byte_oriented)
2597 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2601 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2603 ret = sf1_write(adapter, 4, 1, 0, addr);
2607 ret = sf1_read(adapter, 1, 1, 0, data);
2611 for ( ; nwords; nwords--, data++) {
2612 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2614 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2618 *data = cpu_to_be32(*data);
2624 * t4_get_exprom_version - return the Expansion ROM version (if any)
2625 * @adapter: the adapter
2626 * @vers: where to place the version
2628 * Reads the Expansion ROM header from FLASH and returns the version
2629 * number (if present) through the @vers return value pointer. We return
2630 * this in the Firmware Version Format since it's convenient. Return
2631 * 0 on success, -ENOENT if no Expansion ROM is present.
2633 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2635 struct exprom_header {
2636 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2637 unsigned char hdr_ver[4]; /* Expansion ROM version */
2639 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2643 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2644 ARRAY_SIZE(exprom_header_buf),
2645 exprom_header_buf, 0);
2649 hdr = (struct exprom_header *)exprom_header_buf;
2650 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2653 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2654 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2655 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2656 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2661 * t4_get_fw_version - read the firmware version
2662 * @adapter: the adapter
2663 * @vers: where to place the version
2665 * Reads the FW version from flash.
2667 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2669 return t4_read_flash(adapter, FLASH_FW_START +
2670 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2674 * t4_get_bs_version - read the firmware bootstrap version
2675 * @adapter: the adapter
2676 * @vers: where to place the version
2678 * Reads the FW Bootstrap version from flash.
2680 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2682 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2683 offsetof(struct fw_hdr, fw_ver), 1,
2688 * t4_get_tp_version - read the TP microcode version
2689 * @adapter: the adapter
2690 * @vers: where to place the version
2692 * Reads the TP microcode version from flash.
2694 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2696 return t4_read_flash(adapter, FLASH_FW_START +
2697 offsetof(struct fw_hdr, tp_microcode_ver),
2702 * t4_get_version_info - extract various chip/firmware version information
2703 * @adapter: the adapter
2705 * Reads various chip/firmware version numbers and stores them into the
2706 * adapter Adapter Parameters structure. If any of the efforts fails
2707 * the first failure will be returned, but all of the version numbers
2710 int t4_get_version_info(struct adapter *adapter)
2714 #define FIRST_RET(__getvinfo) \
2716 int __ret = __getvinfo; \
2717 if (__ret && !ret) \
2721 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2722 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2723 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2724 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2732 * t4_dump_version_info - dump all of the adapter configuration IDs
2733 * @adapter: the adapter
2735 * Dumps all of the various bits of adapter configuration version/revision
2736 * IDs information. This is typically called at some point after
2737 * t4_get_version_info() has been called.
2739 void t4_dump_version_info(struct adapter *adapter)
2742 * Device information.
2744 dev_info(adapter, "Chelsio rev %d\n",
2745 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2750 if (!adapter->params.fw_vers)
2751 dev_warn(adapter, "No firmware loaded\n");
2753 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2754 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2755 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2756 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2757 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2760 * Bootstrap Firmware Version.
2762 if (!adapter->params.bs_vers)
2763 dev_warn(adapter, "No bootstrap loaded\n");
2765 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2766 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2767 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2768 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2769 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2772 * TP Microcode Version.
2774 if (!adapter->params.tp_vers)
2775 dev_warn(adapter, "No TP Microcode loaded\n");
2777 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2778 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2779 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2780 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2781 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2784 * Expansion ROM version.
2786 if (!adapter->params.er_vers)
2787 dev_info(adapter, "No Expansion ROM loaded\n");
2789 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2790 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2791 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2792 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2793 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2796 #define ADVERT_MASK (V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED) | \
2799 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
2800 * @caps16: a 16-bit Port Capabilities value
2802 * Returns the equivalent 32-bit Port Capabilities value.
2804 fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
2806 fw_port_cap32_t caps32 = 0;
2808 #define CAP16_TO_CAP32(__cap) \
2810 if (caps16 & FW_PORT_CAP_##__cap) \
2811 caps32 |= FW_PORT_CAP32_##__cap; \
2814 CAP16_TO_CAP32(SPEED_100M);
2815 CAP16_TO_CAP32(SPEED_1G);
2816 CAP16_TO_CAP32(SPEED_25G);
2817 CAP16_TO_CAP32(SPEED_10G);
2818 CAP16_TO_CAP32(SPEED_40G);
2819 CAP16_TO_CAP32(SPEED_100G);
2820 CAP16_TO_CAP32(FC_RX);
2821 CAP16_TO_CAP32(FC_TX);
2822 CAP16_TO_CAP32(ANEG);
2823 CAP16_TO_CAP32(MDIX);
2824 CAP16_TO_CAP32(MDIAUTO);
2825 CAP16_TO_CAP32(FEC_RS);
2826 CAP16_TO_CAP32(FEC_BASER_RS);
2827 CAP16_TO_CAP32(802_3_PAUSE);
2828 CAP16_TO_CAP32(802_3_ASM_DIR);
2830 #undef CAP16_TO_CAP32
2836 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
2837 * @caps32: a 32-bit Port Capabilities value
2839 * Returns the equivalent 16-bit Port Capabilities value. Note that
2840 * not all 32-bit Port Capabilities can be represented in the 16-bit
2841 * Port Capabilities and some fields/values may not make it.
2843 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
2845 fw_port_cap16_t caps16 = 0;
2847 #define CAP32_TO_CAP16(__cap) \
2849 if (caps32 & FW_PORT_CAP32_##__cap) \
2850 caps16 |= FW_PORT_CAP_##__cap; \
2853 CAP32_TO_CAP16(SPEED_100M);
2854 CAP32_TO_CAP16(SPEED_1G);
2855 CAP32_TO_CAP16(SPEED_10G);
2856 CAP32_TO_CAP16(SPEED_25G);
2857 CAP32_TO_CAP16(SPEED_40G);
2858 CAP32_TO_CAP16(SPEED_100G);
2859 CAP32_TO_CAP16(FC_RX);
2860 CAP32_TO_CAP16(FC_TX);
2861 CAP32_TO_CAP16(802_3_PAUSE);
2862 CAP32_TO_CAP16(802_3_ASM_DIR);
2863 CAP32_TO_CAP16(ANEG);
2864 CAP32_TO_CAP16(MDIX);
2865 CAP32_TO_CAP16(MDIAUTO);
2866 CAP32_TO_CAP16(FEC_RS);
2867 CAP32_TO_CAP16(FEC_BASER_RS);
2869 #undef CAP32_TO_CAP16
2874 /* Translate Firmware Pause specification to Common Code */
2875 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
2877 enum cc_pause cc_pause = 0;
2879 if (fw_pause & FW_PORT_CAP32_FC_RX)
2880 cc_pause |= PAUSE_RX;
2881 if (fw_pause & FW_PORT_CAP32_FC_TX)
2882 cc_pause |= PAUSE_TX;
2887 /* Translate Common Code Pause Frame specification into Firmware */
2888 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
2890 fw_port_cap32_t fw_pause = 0;
2892 if (cc_pause & PAUSE_RX)
2893 fw_pause |= FW_PORT_CAP32_FC_RX;
2894 if (cc_pause & PAUSE_TX)
2895 fw_pause |= FW_PORT_CAP32_FC_TX;
2900 /* Translate Firmware Forward Error Correction specification to Common Code */
2901 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
2903 enum cc_fec cc_fec = 0;
2905 if (fw_fec & FW_PORT_CAP32_FEC_RS)
2907 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
2908 cc_fec |= FEC_BASER_RS;
2913 /* Translate Common Code Forward Error Correction specification to Firmware */
2914 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
2916 fw_port_cap32_t fw_fec = 0;
2918 if (cc_fec & FEC_RS)
2919 fw_fec |= FW_PORT_CAP32_FEC_RS;
2920 if (cc_fec & FEC_BASER_RS)
2921 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
2927 * t4_link_l1cfg - apply link configuration to MAC/PHY
2928 * @adapter: the adapter
2929 * @mbox: the Firmware Mailbox to use
2930 * @port: the Port ID
2931 * @lc: the Port's Link Configuration
2933 * Set up a port's MAC and PHY according to a desired link configuration.
2934 * - If the PHY can auto-negotiate first decide what to advertise, then
2935 * enable/disable auto-negotiation as desired, and reset.
2936 * - If the PHY does not auto-negotiate just reset it.
2937 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2938 * otherwise do it later based on the outcome of auto-negotiation.
2940 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2941 struct link_config *lc)
2943 unsigned int fw_mdi = V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
2944 unsigned int fw_caps = adap->params.fw_caps_support;
2945 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
2946 struct fw_port_cmd cmd;
2950 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
2952 /* Convert Common Code Forward Error Control settings into the
2953 * Firmware's API. If the current Requested FEC has "Automatic"
2954 * (IEEE 802.3) specified, then we use whatever the Firmware
2955 * sent us as part of it's IEEE 802.3-based interpratation of
2956 * the Transceiver Module EPROM FEC parameters. Otherwise we
2957 * use whatever is in the current Requested FEC settings.
2959 if (lc->requested_fec & FEC_AUTO)
2960 cc_fec = lc->auto_fec;
2962 cc_fec = lc->requested_fec;
2963 fw_fec = cc_to_fwcap_fec(cc_fec);
2965 /* Figure out what our Requested Port Capabilities are going to be.
2967 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
2968 rcap = (lc->pcaps & ADVERT_MASK) | fw_fc | fw_fec;
2969 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2971 } else if (lc->autoneg == AUTONEG_DISABLE) {
2972 rcap = lc->requested_speed | fw_fc | fw_fec | fw_mdi;
2973 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2976 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
2979 /* And send that on to the Firmware ...
2981 memset(&cmd, 0, sizeof(cmd));
2982 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2983 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2984 V_FW_PORT_CMD_PORTID(port));
2985 cmd.action_to_len16 =
2986 cpu_to_be32(V_FW_PORT_CMD_ACTION(fw_caps == FW_CAPS16 ?
2987 FW_PORT_ACTION_L1_CFG :
2988 FW_PORT_ACTION_L1_CFG32) |
2991 if (fw_caps == FW_CAPS16)
2992 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
2994 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
2996 return t4_wr_mbox(adap, mbox, &cmd, sizeof(cmd), NULL);
3000 * t4_flash_cfg_addr - return the address of the flash configuration file
3001 * @adapter: the adapter
3003 * Return the address within the flash where the Firmware Configuration
3004 * File is stored, or an error if the device FLASH is too small to contain
3005 * a Firmware Configuration File.
3007 int t4_flash_cfg_addr(struct adapter *adapter)
3010 * If the device FLASH isn't large enough to hold a Firmware
3011 * Configuration File, return an error.
3013 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
3016 return FLASH_CFG_START;
3019 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
3022 * t4_intr_enable - enable interrupts
3023 * @adapter: the adapter whose interrupts should be enabled
3025 * Enable PF-specific interrupts for the calling function and the top-level
3026 * interrupt concentrator for global interrupts. Interrupts are already
3027 * enabled at each module, here we just enable the roots of the interrupt
3030 * Note: this function should be called only when the driver manages
3031 * non PF-specific interrupts from the various HW modules. Only one PCI
3032 * function at a time should be doing this.
3034 void t4_intr_enable(struct adapter *adapter)
3037 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3038 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3039 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3041 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
3042 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
3043 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
3044 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
3045 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
3046 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
3047 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
3048 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
3049 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
3050 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
3051 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
3055 * t4_intr_disable - disable interrupts
3056 * @adapter: the adapter whose interrupts should be disabled
3058 * Disable interrupts. We only disable the top-level interrupt
3059 * concentrators. The caller must be a PCI function managing global
3062 void t4_intr_disable(struct adapter *adapter)
3064 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
3065 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
3066 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
3068 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
3069 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
3073 * t4_get_port_type_description - return Port Type string description
3074 * @port_type: firmware Port Type enumeration
3076 const char *t4_get_port_type_description(enum fw_port_type port_type)
3078 static const char * const port_type_description[] = {
3103 if (port_type < ARRAY_SIZE(port_type_description))
3104 return port_type_description[port_type];
3109 * t4_get_mps_bg_map - return the buffer groups associated with a port
3110 * @adap: the adapter
3111 * @pidx: the port index
3113 * Returns a bitmap indicating which MPS buffer groups are associated
3114 * with the given port. Bit i is set if buffer group i is used by the
3117 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
3119 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3120 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
3123 if (pidx >= nports) {
3124 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
3129 switch (chip_version) {
3134 case 2: return 3 << (2 * pidx);
3135 case 4: return 1 << pidx;
3141 case 2: return 1 << (2 * pidx);
3146 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3147 chip_version, nports);
3152 * t4_get_tp_ch_map - return TP ingress channels associated with a port
3153 * @adapter: the adapter
3154 * @pidx: the port index
3156 * Returns a bitmap indicating which TP Ingress Channels are associated with
3157 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
3159 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
3161 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
3162 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
3165 if (pidx >= nports) {
3166 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3171 switch (chip_version) {
3174 /* Note that this happens to be the same values as the MPS
3175 * Buffer Group Map for these Chips. But we replicate the code
3176 * here because they're really separate concepts.
3180 case 2: return 3 << (2 * pidx);
3181 case 4: return 1 << pidx;
3187 case 2: return 1 << pidx;
3192 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3193 chip_version, nports);
3198 * t4_get_port_stats - collect port statistics
3199 * @adap: the adapter
3200 * @idx: the port index
3201 * @p: the stats structure to fill
3203 * Collect statistics related to the given port from HW.
3205 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3207 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3208 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3210 #define GET_STAT(name) \
3211 t4_read_reg64(adap, \
3212 (is_t4(adap->params.chip) ? \
3213 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3214 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3215 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3217 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3218 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3219 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3220 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3221 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3222 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3223 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3224 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3225 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3226 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3227 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3228 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3229 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3230 p->tx_drop = GET_STAT(TX_PORT_DROP);
3231 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3232 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3233 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3234 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3235 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3236 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3237 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3238 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3239 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3241 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3242 if (stat_ctl & F_COUNTPAUSESTATTX) {
3243 p->tx_frames -= p->tx_pause;
3244 p->tx_octets -= p->tx_pause * 64;
3246 if (stat_ctl & F_COUNTPAUSEMCTX)
3247 p->tx_mcast_frames -= p->tx_pause;
3250 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3251 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3252 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3253 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3254 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3255 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3256 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3257 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3258 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3259 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3260 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3261 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3262 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3263 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3264 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3265 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3266 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3267 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3268 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3269 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3270 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3271 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3272 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3273 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3274 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3275 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3276 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3278 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3279 if (stat_ctl & F_COUNTPAUSESTATRX) {
3280 p->rx_frames -= p->rx_pause;
3281 p->rx_octets -= p->rx_pause * 64;
3283 if (stat_ctl & F_COUNTPAUSEMCRX)
3284 p->rx_mcast_frames -= p->rx_pause;
3287 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3288 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3289 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3290 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3291 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3292 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3293 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3294 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3301 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3302 * @adap: The adapter
3304 * @stats: Current stats to fill
3305 * @offset: Previous stats snapshot
3307 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3308 struct port_stats *stats,
3309 struct port_stats *offset)
3314 t4_get_port_stats(adap, idx, stats);
3315 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3316 i < (sizeof(struct port_stats) / sizeof(u64));
3322 * t4_clr_port_stats - clear port statistics
3323 * @adap: the adapter
3324 * @idx: the port index
3326 * Clear HW statistics for the given port.
3328 void t4_clr_port_stats(struct adapter *adap, int idx)
3331 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3334 if (is_t4(adap->params.chip))
3335 port_base_addr = PORT_BASE(idx);
3337 port_base_addr = T5_PORT_BASE(idx);
3339 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3340 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3341 t4_write_reg(adap, port_base_addr + i, 0);
3342 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3343 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3344 t4_write_reg(adap, port_base_addr + i, 0);
3345 for (i = 0; i < 4; i++)
3346 if (bgmap & (1 << i)) {
3348 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3351 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3357 * t4_fw_hello - establish communication with FW
3358 * @adap: the adapter
3359 * @mbox: mailbox to use for the FW command
3360 * @evt_mbox: mailbox to receive async FW events
3361 * @master: specifies the caller's willingness to be the device master
3362 * @state: returns the current device state (if non-NULL)
3364 * Issues a command to establish communication with FW. Returns either
3365 * an error (negative integer) or the mailbox of the Master PF.
3367 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3368 enum dev_master master, enum dev_state *state)
3371 struct fw_hello_cmd c;
3373 unsigned int master_mbox;
3374 int retries = FW_CMD_HELLO_RETRIES;
3377 memset(&c, 0, sizeof(c));
3378 INIT_CMD(c, HELLO, WRITE);
3379 c.err_to_clearinit = cpu_to_be32(
3380 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3381 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3382 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3383 M_FW_HELLO_CMD_MBMASTER) |
3384 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3385 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3386 F_FW_HELLO_CMD_CLEARINIT);
3389 * Issue the HELLO command to the firmware. If it's not successful
3390 * but indicates that we got a "busy" or "timeout" condition, retry
3391 * the HELLO until we exhaust our retry limit. If we do exceed our
3392 * retry limit, check to see if the firmware left us any error
3393 * information and report that if so ...
3395 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3396 if (ret != FW_SUCCESS) {
3397 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3399 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3400 t4_report_fw_error(adap);
3404 v = be32_to_cpu(c.err_to_clearinit);
3405 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3407 if (v & F_FW_HELLO_CMD_ERR)
3408 *state = DEV_STATE_ERR;
3409 else if (v & F_FW_HELLO_CMD_INIT)
3410 *state = DEV_STATE_INIT;
3412 *state = DEV_STATE_UNINIT;
3416 * If we're not the Master PF then we need to wait around for the
3417 * Master PF Driver to finish setting up the adapter.
3419 * Note that we also do this wait if we're a non-Master-capable PF and
3420 * there is no current Master PF; a Master PF may show up momentarily
3421 * and we wouldn't want to fail pointlessly. (This can happen when an
3422 * OS loads lots of different drivers rapidly at the same time). In
3423 * this case, the Master PF returned by the firmware will be
3424 * M_PCIE_FW_MASTER so the test below will work ...
3426 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3427 master_mbox != mbox) {
3428 int waiting = FW_CMD_HELLO_TIMEOUT;
3431 * Wait for the firmware to either indicate an error or
3432 * initialized state. If we see either of these we bail out
3433 * and report the issue to the caller. If we exhaust the
3434 * "hello timeout" and we haven't exhausted our retries, try
3435 * again. Otherwise bail with a timeout error.
3444 * If neither Error nor Initialialized are indicated
3445 * by the firmware keep waiting till we exaust our
3446 * timeout ... and then retry if we haven't exhausted
3449 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3450 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3461 * We either have an Error or Initialized condition
3462 * report errors preferentially.
3465 if (pcie_fw & F_PCIE_FW_ERR)
3466 *state = DEV_STATE_ERR;
3467 else if (pcie_fw & F_PCIE_FW_INIT)
3468 *state = DEV_STATE_INIT;
3472 * If we arrived before a Master PF was selected and
3473 * there's not a valid Master PF, grab its identity
3476 if (master_mbox == M_PCIE_FW_MASTER &&
3477 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3478 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3487 * t4_fw_bye - end communication with FW
3488 * @adap: the adapter
3489 * @mbox: mailbox to use for the FW command
3491 * Issues a command to terminate communication with FW.
3493 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3495 struct fw_bye_cmd c;
3497 memset(&c, 0, sizeof(c));
3498 INIT_CMD(c, BYE, WRITE);
3499 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3503 * t4_fw_reset - issue a reset to FW
3504 * @adap: the adapter
3505 * @mbox: mailbox to use for the FW command
3506 * @reset: specifies the type of reset to perform
3508 * Issues a reset command of the specified type to FW.
3510 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3512 struct fw_reset_cmd c;
3514 memset(&c, 0, sizeof(c));
3515 INIT_CMD(c, RESET, WRITE);
3516 c.val = cpu_to_be32(reset);
3517 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3521 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3522 * @adap: the adapter
3523 * @mbox: mailbox to use for the FW RESET command (if desired)
3524 * @force: force uP into RESET even if FW RESET command fails
3526 * Issues a RESET command to firmware (if desired) with a HALT indication
3527 * and then puts the microprocessor into RESET state. The RESET command
3528 * will only be issued if a legitimate mailbox is provided (mbox <=
3529 * M_PCIE_FW_MASTER).
3531 * This is generally used in order for the host to safely manipulate the
3532 * adapter without fear of conflicting with whatever the firmware might
3533 * be doing. The only way out of this state is to RESTART the firmware
3536 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3541 * If a legitimate mailbox is provided, issue a RESET command
3542 * with a HALT indication.
3544 if (mbox <= M_PCIE_FW_MASTER) {
3545 struct fw_reset_cmd c;
3547 memset(&c, 0, sizeof(c));
3548 INIT_CMD(c, RESET, WRITE);
3549 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3550 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3551 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3555 * Normally we won't complete the operation if the firmware RESET
3556 * command fails but if our caller insists we'll go ahead and put the
3557 * uP into RESET. This can be useful if the firmware is hung or even
3558 * missing ... We'll have to take the risk of putting the uP into
3559 * RESET without the cooperation of firmware in that case.
3561 * We also force the firmware's HALT flag to be on in case we bypassed
3562 * the firmware RESET command above or we're dealing with old firmware
3563 * which doesn't have the HALT capability. This will serve as a flag
3564 * for the incoming firmware to know that it's coming out of a HALT
3565 * rather than a RESET ... if it's new enough to understand that ...
3567 if (ret == 0 || force) {
3568 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3569 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3574 * And we always return the result of the firmware RESET command
3575 * even when we force the uP into RESET ...
3581 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3582 * @adap: the adapter
3583 * @mbox: mailbox to use for the FW RESET command (if desired)
3584 * @reset: if we want to do a RESET to restart things
3586 * Restart firmware previously halted by t4_fw_halt(). On successful
3587 * return the previous PF Master remains as the new PF Master and there
3588 * is no need to issue a new HELLO command, etc.
3590 * We do this in two ways:
3592 * 1. If we're dealing with newer firmware we'll simply want to take
3593 * the chip's microprocessor out of RESET. This will cause the
3594 * firmware to start up from its start vector. And then we'll loop
3595 * until the firmware indicates it's started again (PCIE_FW.HALT
3596 * reset to 0) or we timeout.
3598 * 2. If we're dealing with older firmware then we'll need to RESET
3599 * the chip since older firmware won't recognize the PCIE_FW.HALT
3600 * flag and automatically RESET itself on startup.
3602 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3606 * Since we're directing the RESET instead of the firmware
3607 * doing it automatically, we need to clear the PCIE_FW.HALT
3610 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3613 * If we've been given a valid mailbox, first try to get the
3614 * firmware to do the RESET. If that works, great and we can
3615 * return success. Otherwise, if we haven't been given a
3616 * valid mailbox or the RESET command failed, fall back to
3617 * hitting the chip with a hammer.
3619 if (mbox <= M_PCIE_FW_MASTER) {
3620 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3622 if (t4_fw_reset(adap, mbox,
3623 F_PIORST | F_PIORSTMODE) == 0)
3627 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3632 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3633 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3634 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3645 * t4_fl_pkt_align - return the fl packet alignment
3646 * @adap: the adapter
3648 * T4 has a single field to specify the packing and padding boundary.
3649 * T5 onwards has separate fields for this and hence the alignment for
3650 * next packet offset is maximum of these two.
3652 int t4_fl_pkt_align(struct adapter *adap)
3654 u32 sge_control, sge_control2;
3655 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3657 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3659 /* T4 uses a single control field to specify both the PCIe Padding and
3660 * Packing Boundary. T5 introduced the ability to specify these
3661 * separately. The actual Ingress Packet Data alignment boundary
3662 * within Packed Buffer Mode is the maximum of these two
3665 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3666 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3668 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3670 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3672 fl_align = ingpadboundary;
3673 if (!is_t4(adap->params.chip)) {
3674 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3675 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3676 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3677 ingpackboundary = 16;
3679 ingpackboundary = 1 << (ingpackboundary +
3680 X_INGPACKBOUNDARY_SHIFT);
3682 fl_align = max(ingpadboundary, ingpackboundary);
3688 * t4_fixup_host_params_compat - fix up host-dependent parameters
3689 * @adap: the adapter
3690 * @page_size: the host's Base Page Size
3691 * @cache_line_size: the host's Cache Line Size
3692 * @chip_compat: maintain compatibility with designated chip
3694 * Various registers in the chip contain values which are dependent on the
3695 * host's Base Page and Cache Line Sizes. This function will fix all of
3696 * those registers with the appropriate values as passed in ...
3698 * @chip_compat is used to limit the set of changes that are made
3699 * to be compatible with the indicated chip release. This is used by
3700 * drivers to maintain compatibility with chip register settings when
3701 * the drivers haven't [yet] been updated with new chip support.
3703 int t4_fixup_host_params_compat(struct adapter *adap,
3704 unsigned int page_size,
3705 unsigned int cache_line_size,
3706 enum chip_type chip_compat)
3708 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3709 unsigned int sge_hps = page_shift - 10;
3710 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3711 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3712 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3714 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3715 V_HOSTPAGESIZEPF0(sge_hps) |
3716 V_HOSTPAGESIZEPF1(sge_hps) |
3717 V_HOSTPAGESIZEPF2(sge_hps) |
3718 V_HOSTPAGESIZEPF3(sge_hps) |
3719 V_HOSTPAGESIZEPF4(sge_hps) |
3720 V_HOSTPAGESIZEPF5(sge_hps) |
3721 V_HOSTPAGESIZEPF6(sge_hps) |
3722 V_HOSTPAGESIZEPF7(sge_hps));
3724 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3725 t4_set_reg_field(adap, A_SGE_CONTROL,
3726 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3727 F_EGRSTATUSPAGESIZE,
3728 V_INGPADBOUNDARY(fl_align_log -
3729 X_INGPADBOUNDARY_SHIFT) |
3730 V_EGRSTATUSPAGESIZE(stat_len != 64));
3732 unsigned int pack_align;
3733 unsigned int ingpad, ingpack;
3734 unsigned int pcie_cap;
3737 * T5 introduced the separation of the Free List Padding and
3738 * Packing Boundaries. Thus, we can select a smaller Padding
3739 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3740 * Bandwidth, and use a Packing Boundary which is large enough
3741 * to avoid false sharing between CPUs, etc.
3743 * For the PCI Link, the smaller the Padding Boundary the
3744 * better. For the Memory Controller, a smaller Padding
3745 * Boundary is better until we cross under the Memory Line
3746 * Size (the minimum unit of transfer to/from Memory). If we
3747 * have a Padding Boundary which is smaller than the Memory
3748 * Line Size, that'll involve a Read-Modify-Write cycle on the
3749 * Memory Controller which is never good.
3752 /* We want the Packing Boundary to be based on the Cache Line
3753 * Size in order to help avoid False Sharing performance
3754 * issues between CPUs, etc. We also want the Packing
3755 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3756 * get best performance when the Packing Boundary is a
3757 * multiple of the Maximum Payload Size.
3759 pack_align = fl_align;
3760 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3762 unsigned int mps, mps_log;
3765 /* The PCIe Device Control Maximum Payload Size field
3766 * [bits 7:5] encodes sizes as powers of 2 starting at
3769 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3771 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3773 if (mps > pack_align)
3778 * N.B. T5 has a different interpretation of the "0" value for
3779 * the Packing Boundary. This corresponds to 16 bytes instead
3780 * of the expected 32 bytes. We never have a Packing Boundary
3781 * less than 32 bytes so we can't use that special value but
3782 * on the other hand, if we wanted 32 bytes, the best we can
3783 * really do is 64 bytes ...
3785 if (pack_align <= 16) {
3786 ingpack = X_INGPACKBOUNDARY_16B;
3788 } else if (pack_align == 32) {
3789 ingpack = X_INGPACKBOUNDARY_64B;
3792 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3794 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3795 fl_align = pack_align;
3798 /* Use the smallest Ingress Padding which isn't smaller than
3799 * the Memory Controller Read/Write Size. We'll take that as
3800 * being 8 bytes since we don't know of any system with a
3801 * wider Memory Controller Bus Width.
3803 if (is_t5(adap->params.chip))
3804 ingpad = X_INGPADBOUNDARY_32B;
3806 ingpad = X_T6_INGPADBOUNDARY_8B;
3807 t4_set_reg_field(adap, A_SGE_CONTROL,
3808 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3809 F_EGRSTATUSPAGESIZE,
3810 V_INGPADBOUNDARY(ingpad) |
3811 V_EGRSTATUSPAGESIZE(stat_len != 64));
3812 t4_set_reg_field(adap, A_SGE_CONTROL2,
3813 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3814 V_INGPACKBOUNDARY(ingpack));
3818 * Adjust various SGE Free List Host Buffer Sizes.
3820 * The first four entries are:
3824 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3825 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3827 * For the single-MTU buffers in unpacked mode we need to include
3828 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3829 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3830 * Padding boundary. All of these are accommodated in the Factory
3831 * Default Firmware Configuration File but we need to adjust it for
3832 * this host's cache line size.
3834 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3835 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3836 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3838 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3839 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3842 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3848 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3849 * @adap: the adapter
3850 * @page_size: the host's Base Page Size
3851 * @cache_line_size: the host's Cache Line Size
3853 * Various registers in T4 contain values which are dependent on the
3854 * host's Base Page and Cache Line Sizes. This function will fix all of
3855 * those registers with the appropriate values as passed in ...
3857 * This routine makes changes which are compatible with T4 chips.
3859 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3860 unsigned int cache_line_size)
3862 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3867 * t4_fw_initialize - ask FW to initialize the device
3868 * @adap: the adapter
3869 * @mbox: mailbox to use for the FW command
3871 * Issues a command to FW to partially initialize the device. This
3872 * performs initialization that generally doesn't depend on user input.
3874 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3876 struct fw_initialize_cmd c;
3878 memset(&c, 0, sizeof(c));
3879 INIT_CMD(c, INITIALIZE, WRITE);
3880 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3884 * t4_query_params_rw - query FW or device parameters
3885 * @adap: the adapter
3886 * @mbox: mailbox to use for the FW command
3889 * @nparams: the number of parameters
3890 * @params: the parameter names
3891 * @val: the parameter values
3892 * @rw: Write and read flag
3894 * Reads the value of FW or device parameters. Up to 7 parameters can be
3897 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3898 unsigned int pf, unsigned int vf,
3899 unsigned int nparams, const u32 *params,
3904 struct fw_params_cmd c;
3905 __be32 *p = &c.param[0].mnem;
3910 memset(&c, 0, sizeof(c));
3911 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3912 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3913 V_FW_PARAMS_CMD_PFN(pf) |
3914 V_FW_PARAMS_CMD_VFN(vf));
3915 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3917 for (i = 0; i < nparams; i++) {
3918 *p++ = cpu_to_be32(*params++);
3920 *p = cpu_to_be32(*(val + i));
3924 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3926 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3927 *val++ = be32_to_cpu(*p);
3931 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3932 unsigned int vf, unsigned int nparams, const u32 *params,
3935 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3939 * t4_set_params_timeout - sets FW or device parameters
3940 * @adap: the adapter
3941 * @mbox: mailbox to use for the FW command
3944 * @nparams: the number of parameters
3945 * @params: the parameter names
3946 * @val: the parameter values
3947 * @timeout: the timeout time
3949 * Sets the value of FW or device parameters. Up to 7 parameters can be
3950 * specified at once.
3952 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3953 unsigned int pf, unsigned int vf,
3954 unsigned int nparams, const u32 *params,
3955 const u32 *val, int timeout)
3957 struct fw_params_cmd c;
3958 __be32 *p = &c.param[0].mnem;
3963 memset(&c, 0, sizeof(c));
3964 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3965 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3966 V_FW_PARAMS_CMD_PFN(pf) |
3967 V_FW_PARAMS_CMD_VFN(vf));
3968 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3971 *p++ = cpu_to_be32(*params++);
3972 *p++ = cpu_to_be32(*val++);
3975 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3978 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3979 unsigned int vf, unsigned int nparams, const u32 *params,
3982 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3983 FW_CMD_MAX_TIMEOUT);
3987 * t4_alloc_vi_func - allocate a virtual interface
3988 * @adap: the adapter
3989 * @mbox: mailbox to use for the FW command
3990 * @port: physical port associated with the VI
3991 * @pf: the PF owning the VI
3992 * @vf: the VF owning the VI
3993 * @nmac: number of MAC addresses needed (1 to 5)
3994 * @mac: the MAC addresses of the VI
3995 * @rss_size: size of RSS table slice associated with this VI
3996 * @portfunc: which Port Application Function MAC Address is desired
3997 * @idstype: Intrusion Detection Type
3999 * Allocates a virtual interface for the given physical port. If @mac is
4000 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
4001 * @mac should be large enough to hold @nmac Ethernet addresses, they are
4002 * stored consecutively so the space needed is @nmac * 6 bytes.
4003 * Returns a negative error number or the non-negative VI id.
4005 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
4006 unsigned int port, unsigned int pf, unsigned int vf,
4007 unsigned int nmac, u8 *mac, unsigned int *rss_size,
4008 unsigned int portfunc, unsigned int idstype)
4013 memset(&c, 0, sizeof(c));
4014 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4015 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
4016 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
4017 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
4018 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
4019 V_FW_VI_CMD_FUNC(portfunc));
4020 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
4023 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4028 memcpy(mac, c.mac, sizeof(c.mac));
4031 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
4034 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
4037 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
4040 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
4045 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
4046 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
4050 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
4051 * @adap: the adapter
4052 * @mbox: mailbox to use for the FW command
4053 * @port: physical port associated with the VI
4054 * @pf: the PF owning the VI
4055 * @vf: the VF owning the VI
4056 * @nmac: number of MAC addresses needed (1 to 5)
4057 * @mac: the MAC addresses of the VI
4058 * @rss_size: size of RSS table slice associated with this VI
4060 * Backwards compatible and convieniance routine to allocate a Virtual
4061 * Interface with a Ethernet Port Application Function and Intrustion
4062 * Detection System disabled.
4064 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
4065 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
4066 unsigned int *rss_size)
4068 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
4073 * t4_free_vi - free a virtual interface
4074 * @adap: the adapter
4075 * @mbox: mailbox to use for the FW command
4076 * @pf: the PF owning the VI
4077 * @vf: the VF owning the VI
4078 * @viid: virtual interface identifiler
4080 * Free a previously allocated virtual interface.
4082 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
4083 unsigned int vf, unsigned int viid)
4087 memset(&c, 0, sizeof(c));
4088 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
4091 c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
4092 V_FW_VI_CMD_VFN(vf));
4093 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
4094 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
4097 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4099 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4103 * t4_set_rxmode - set Rx properties of a virtual interface
4104 * @adap: the adapter
4105 * @mbox: mailbox to use for the FW command
4107 * @mtu: the new MTU or -1
4108 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
4109 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
4110 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
4111 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
4113 * @sleep_ok: if true we may sleep while awaiting command completion
4115 * Sets Rx properties of a virtual interface.
4117 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
4118 int mtu, int promisc, int all_multi, int bcast, int vlanex,
4121 struct fw_vi_rxmode_cmd c;
4123 /* convert to FW values */
4125 mtu = M_FW_VI_RXMODE_CMD_MTU;
4127 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
4129 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
4131 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
4133 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
4135 memset(&c, 0, sizeof(c));
4136 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
4137 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4138 V_FW_VI_RXMODE_CMD_VIID(viid));
4139 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4140 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4141 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4142 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4143 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4144 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4146 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
4149 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4153 * t4_change_mac - modifies the exact-match filter for a MAC address
4154 * @adap: the adapter
4155 * @mbox: mailbox to use for the FW command
4157 * @idx: index of existing filter for old value of MAC address, or -1
4158 * @addr: the new MAC address value
4159 * @persist: whether a new MAC allocation should be persistent
4160 * @add_smt: if true also add the address to the HW SMT
4162 * Modifies an exact-match filter and sets it to the new MAC address if
4163 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4164 * latter case the address is added persistently if @persist is %true.
4166 * Note that in general it is not possible to modify the value of a given
4167 * filter so the generic way to modify an address filter is to free the one
4168 * being used by the old address value and allocate a new filter for the
4169 * new address value.
4171 * Returns a negative error number or the index of the filter with the new
4172 * MAC value. Note that this index may differ from @idx.
4174 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4175 int idx, const u8 *addr, bool persist, bool add_smt)
4178 struct fw_vi_mac_cmd c;
4179 struct fw_vi_mac_exact *p = c.u.exact;
4180 int max_mac_addr = adap->params.arch.mps_tcam_size;
4182 if (idx < 0) /* new allocation */
4183 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4184 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4186 memset(&c, 0, sizeof(c));
4187 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4188 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4189 V_FW_VI_MAC_CMD_VIID(viid));
4190 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4191 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4192 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4193 V_FW_VI_MAC_CMD_IDX(idx));
4194 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4197 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4199 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4201 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4202 if (ret >= max_mac_addr)
4209 * t4_enable_vi_params - enable/disable a virtual interface
4210 * @adap: the adapter
4211 * @mbox: mailbox to use for the FW command
4213 * @rx_en: 1=enable Rx, 0=disable Rx
4214 * @tx_en: 1=enable Tx, 0=disable Tx
4215 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4217 * Enables/disables a virtual interface. Note that setting DCB Enable
4218 * only makes sense when enabling a Virtual Interface ...
4220 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4221 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4223 struct fw_vi_enable_cmd c;
4225 memset(&c, 0, sizeof(c));
4226 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4227 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4228 V_FW_VI_ENABLE_CMD_VIID(viid));
4229 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4230 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4231 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4234 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4236 return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4240 * t4_enable_vi - enable/disable a virtual interface
4241 * @adap: the adapter
4242 * @mbox: mailbox to use for the FW command
4244 * @rx_en: 1=enable Rx, 0=disable Rx
4245 * @tx_en: 1=enable Tx, 0=disable Tx
4247 * Enables/disables a virtual interface. Note that setting DCB Enable
4248 * only makes sense when enabling a Virtual Interface ...
4250 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4251 bool rx_en, bool tx_en)
4253 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4257 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4258 * @adap: the adapter
4259 * @mbox: mailbox to use for the FW command
4260 * @start: %true to enable the queues, %false to disable them
4261 * @pf: the PF owning the queues
4262 * @vf: the VF owning the queues
4263 * @iqid: ingress queue id
4264 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4265 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4267 * Starts or stops an ingress queue and its associated FLs, if any.
4269 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4270 unsigned int pf, unsigned int vf, unsigned int iqid,
4271 unsigned int fl0id, unsigned int fl1id)
4275 memset(&c, 0, sizeof(c));
4276 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4278 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4279 V_FW_IQ_CMD_IQSTOP(!start) |
4281 c.iqid = cpu_to_be16(iqid);
4282 c.fl0id = cpu_to_be16(fl0id);
4283 c.fl1id = cpu_to_be16(fl1id);
4285 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4286 V_FW_IQ_CMD_VFN(vf));
4287 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4289 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4294 * t4_iq_free - free an ingress queue and its FLs
4295 * @adap: the adapter
4296 * @mbox: mailbox to use for the FW command
4297 * @pf: the PF owning the queues
4298 * @vf: the VF owning the queues
4299 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4300 * @iqid: ingress queue id
4301 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4302 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4304 * Frees an ingress queue and its associated FLs, if any.
4306 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4307 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4308 unsigned int fl0id, unsigned int fl1id)
4312 memset(&c, 0, sizeof(c));
4313 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4316 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4317 V_FW_IQ_CMD_VFN(vf));
4318 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4319 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4320 c.iqid = cpu_to_be16(iqid);
4321 c.fl0id = cpu_to_be16(fl0id);
4322 c.fl1id = cpu_to_be16(fl1id);
4324 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4326 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4330 * t4_eth_eq_free - free an Ethernet egress queue
4331 * @adap: the adapter
4332 * @mbox: mailbox to use for the FW command
4333 * @pf: the PF owning the queue
4334 * @vf: the VF owning the queue
4335 * @eqid: egress queue id
4337 * Frees an Ethernet egress queue.
4339 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4340 unsigned int vf, unsigned int eqid)
4342 struct fw_eq_eth_cmd c;
4344 memset(&c, 0, sizeof(c));
4345 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4346 F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4348 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4349 V_FW_IQ_CMD_VFN(vf));
4350 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4351 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4353 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4355 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4359 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4360 * @link_down_rc: Link Down Reason Code
4362 * Returns a string representation of the Link Down Reason Code.
4364 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4366 static const char * const reason[] = {
4369 "Auto-negotiation Failure",
4371 "Insufficient Airflow",
4372 "Unable To Determine Reason",
4373 "No RX Signal Detected",
4377 if (link_down_rc >= ARRAY_SIZE(reason))
4378 return "Bad Reason Code";
4380 return reason[link_down_rc];
4383 /* Return the highest speed set in the port capabilities, in Mb/s. */
4384 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
4386 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4388 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4392 TEST_SPEED_RETURN(100G, 100000);
4393 TEST_SPEED_RETURN(50G, 50000);
4394 TEST_SPEED_RETURN(40G, 40000);
4395 TEST_SPEED_RETURN(25G, 25000);
4396 TEST_SPEED_RETURN(10G, 10000);
4397 TEST_SPEED_RETURN(1G, 1000);
4398 TEST_SPEED_RETURN(100M, 100);
4400 #undef TEST_SPEED_RETURN
4406 * t4_handle_get_port_info - process a FW reply message
4407 * @pi: the port info
4408 * @rpl: start of the FW message
4410 * Processes a GET_PORT_INFO FW reply message.
4412 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4414 const struct fw_port_cmd *cmd = (const void *)rpl;
4415 int action = G_FW_PORT_CMD_ACTION(be32_to_cpu(cmd->action_to_len16));
4416 fw_port_cap32_t pcaps, acaps, linkattr;
4417 struct link_config *lc = &pi->link_cfg;
4418 struct adapter *adapter = pi->adapter;
4419 enum fw_port_module_type mod_type;
4420 enum fw_port_type port_type;
4421 unsigned int speed, fc, fec;
4422 int link_ok, linkdnrc;
4424 /* Extract the various fields from the Port Information message.
4427 case FW_PORT_ACTION_GET_PORT_INFO: {
4428 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
4430 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS) != 0;
4431 linkdnrc = G_FW_PORT_CMD_LINKDNRC(lstatus);
4432 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
4433 mod_type = G_FW_PORT_CMD_MODTYPE(lstatus);
4434 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
4435 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
4437 /* Unfortunately the format of the Link Status in the old
4438 * 16-bit Port Information message isn't the same as the
4439 * 16-bit Port Capabilities bitfield used everywhere else ...
4442 if (lstatus & F_FW_PORT_CMD_RXPAUSE)
4443 linkattr |= FW_PORT_CAP32_FC_RX;
4444 if (lstatus & F_FW_PORT_CMD_TXPAUSE)
4445 linkattr |= FW_PORT_CAP32_FC_TX;
4446 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4447 linkattr |= FW_PORT_CAP32_SPEED_100M;
4448 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4449 linkattr |= FW_PORT_CAP32_SPEED_1G;
4450 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4451 linkattr |= FW_PORT_CAP32_SPEED_10G;
4452 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
4453 linkattr |= FW_PORT_CAP32_SPEED_25G;
4454 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
4455 linkattr |= FW_PORT_CAP32_SPEED_40G;
4456 if (lstatus & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
4457 linkattr |= FW_PORT_CAP32_SPEED_100G;
4462 case FW_PORT_ACTION_GET_PORT_INFO32: {
4464 be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4466 link_ok = (lstatus32 & F_FW_PORT_CMD_LSTATUS32) != 0;
4467 linkdnrc = G_FW_PORT_CMD_LINKDNRC32(lstatus32);
4468 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
4469 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus32);
4470 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4471 acaps = be32_to_cpu(cmd->u.info32.acaps32);
4472 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
4477 dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4478 be32_to_cpu(cmd->action_to_len16));
4482 fec = fwcap_to_cc_fec(acaps);
4484 fc = fwcap_to_cc_pause(linkattr);
4485 speed = fwcap_to_speed(linkattr);
4487 if (mod_type != pi->mod_type) {
4489 pi->port_type = port_type;
4490 pi->mod_type = mod_type;
4491 t4_os_portmod_changed(adapter, pi->port_id);
4493 if (link_ok != lc->link_ok || speed != lc->speed ||
4494 fc != lc->fc || fec != lc->fec) { /* something changed */
4495 if (!link_ok && lc->link_ok) {
4496 lc->link_down_rc = linkdnrc;
4497 dev_warn(adap, "Port %d link down, reason: %s\n",
4498 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
4500 lc->link_ok = link_ok;
4505 lc->acaps = acaps & ADVERT_MASK;
4507 if (lc->acaps & FW_PORT_CAP32_ANEG) {
4508 lc->autoneg = AUTONEG_ENABLE;
4510 /* When Autoneg is disabled, user needs to set
4512 * Similar to cxgb4_ethtool.c: set_link_ksettings
4515 lc->requested_speed = fwcap_to_speed(acaps);
4516 lc->autoneg = AUTONEG_DISABLE;
4522 * t4_handle_fw_rpl - process a FW reply message
4523 * @adap: the adapter
4524 * @rpl: start of the FW message
4526 * Processes a FW message, such as link state change messages.
4528 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4530 u8 opcode = *(const u8 *)rpl;
4533 * This might be a port command ... this simplifies the following
4534 * conditionals ... We can get away with pre-dereferencing
4535 * action_to_len16 because it's in the first 16 bytes and all messages
4536 * will be at least that long.
4538 const struct fw_port_cmd *p = (const void *)rpl;
4539 unsigned int action =
4540 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4542 if (opcode == FW_PORT_CMD &&
4543 (action == FW_PORT_ACTION_GET_PORT_INFO ||
4544 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
4545 /* link/module state change message */
4546 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4547 struct port_info *pi = NULL;
4550 for_each_port(adap, i) {
4551 pi = adap2pinfo(adap, i);
4552 if (pi->tx_chan == chan)
4556 t4_handle_get_port_info(pi, rpl);
4558 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4564 void t4_reset_link_config(struct adapter *adap, int idx)
4566 struct port_info *pi = adap2pinfo(adap, idx);
4567 struct link_config *lc = &pi->link_cfg;
4570 lc->requested_speed = 0;
4571 lc->requested_fc = 0;
4577 * init_link_config - initialize a link's SW state
4578 * @lc: structure holding the link state
4579 * @pcaps: link Port Capabilities
4580 * @acaps: link current Advertised Port Capabilities
4582 * Initializes the SW state maintained for each link, including the link's
4583 * capabilities and default speed/flow-control/autonegotiation settings.
4585 void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
4586 fw_port_cap32_t acaps)
4589 lc->requested_speed = 0;
4591 lc->requested_fc = 0;
4595 * For Forward Error Control, we default to whatever the Firmware
4596 * tells us the Link is currently advertising.
4598 lc->auto_fec = fwcap_to_cc_fec(acaps);
4599 lc->requested_fec = FEC_AUTO;
4600 lc->fec = lc->auto_fec;
4602 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
4603 lc->acaps = lc->pcaps & ADVERT_MASK;
4604 lc->autoneg = AUTONEG_ENABLE;
4605 lc->requested_fc |= PAUSE_AUTONEG;
4608 lc->autoneg = AUTONEG_DISABLE;
4613 * t4_wait_dev_ready - wait till to reads of registers work
4615 * Right after the device is RESET is can take a small amount of time
4616 * for it to respond to register reads. Until then, all reads will
4617 * return either 0xff...ff or 0xee...ee. Return an error if reads
4618 * don't work within a reasonable time frame.
4620 static int t4_wait_dev_ready(struct adapter *adapter)
4624 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4626 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4630 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4631 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4634 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4640 u32 vendor_and_model_id;
4644 int t4_get_flash_params(struct adapter *adapter)
4647 * Table for non-Numonix supported flash parts. Numonix parts are left
4648 * to the preexisting well-tested code. All flash parts have 64KB
4651 static struct flash_desc supported_flash[] = {
4652 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4657 unsigned int part, manufacturer;
4658 unsigned int density, size;
4661 * Issue a Read ID Command to the Flash part. We decode supported
4662 * Flash parts and their sizes from this. There's a newer Query
4663 * Command which can retrieve detailed geometry information but
4664 * many Flash parts don't support it.
4666 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4668 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4669 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4673 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4674 if (supported_flash[part].vendor_and_model_id == flashid) {
4675 adapter->params.sf_size =
4676 supported_flash[part].size_mb;
4677 adapter->params.sf_nsec =
4678 adapter->params.sf_size / SF_SEC_SIZE;
4683 manufacturer = flashid & 0xff;
4684 switch (manufacturer) {
4685 case 0x20: { /* Micron/Numonix */
4687 * This Density -> Size decoding table is taken from Micron
4690 density = (flashid >> 16) & 0xff;
4693 size = 1 << 20; /* 1MB */
4696 size = 1 << 21; /* 2MB */
4699 size = 1 << 22; /* 4MB */
4702 size = 1 << 23; /* 8MB */
4705 size = 1 << 24; /* 16MB */
4708 size = 1 << 25; /* 32MB */
4711 size = 1 << 26; /* 64MB */
4714 size = 1 << 27; /* 128MB */
4717 size = 1 << 28; /* 256MB */
4720 dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
4725 adapter->params.sf_size = size;
4726 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4730 dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
4736 * We should reject adapters with FLASHes which are too small. So, emit
4739 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4740 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4741 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4746 static void set_pcie_completion_timeout(struct adapter *adapter,
4752 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4754 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4757 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4762 * t4_get_chip_type - Determine chip type from device ID
4763 * @adap: the adapter
4764 * @ver: adapter version
4766 int t4_get_chip_type(struct adapter *adap, int ver)
4768 enum chip_type chip = 0;
4769 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4771 /* Retrieve adapter's device ID */
4774 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4777 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4780 dev_err(adap, "Device %d is not supported\n",
4781 adap->params.pci.device_id);
4789 * t4_prep_adapter - prepare SW and HW for operation
4790 * @adapter: the adapter
4792 * Initialize adapter SW state for the various HW modules, set initial
4793 * values for some adapter tunables, take PHYs out of reset, and
4794 * initialize the MDIO interface.
4796 int t4_prep_adapter(struct adapter *adapter)
4801 ret = t4_wait_dev_ready(adapter);
4805 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
4806 adapter->params.pci.device_id = adapter->pdev->id.device_id;
4807 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
4810 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
4811 * ADAPTER (VERSION << 4 | REVISION)
4813 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
4814 adapter->params.chip = 0;
4817 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4818 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
4819 adapter->params.arch.mps_tcam_size =
4820 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4821 adapter->params.arch.mps_rplc_size = 128;
4822 adapter->params.arch.nchan = NCHAN;
4823 adapter->params.arch.vfcount = 128;
4826 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4827 adapter->params.arch.sge_fl_db = 0;
4828 adapter->params.arch.mps_tcam_size =
4829 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4830 adapter->params.arch.mps_rplc_size = 256;
4831 adapter->params.arch.nchan = 2;
4832 adapter->params.arch.vfcount = 256;
4835 dev_err(adapter, "%s: Device %d is not supported\n",
4836 __func__, adapter->params.pci.device_id);
4840 adapter->params.pci.vpd_cap_addr =
4841 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4843 ret = t4_get_flash_params(adapter);
4845 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
4850 adapter->params.cim_la_size = CIMLA_SIZE;
4852 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4855 * Default port and clock for debugging in case we can't reach FW.
4857 adapter->params.nports = 1;
4858 adapter->params.portvec = 1;
4859 adapter->params.vpd.cclk = 50000;
4861 /* Set pci completion timeout value to 4 seconds. */
4862 set_pcie_completion_timeout(adapter, 0xd);
4867 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
4868 * @adapter: the adapter
4869 * @qid: the Queue ID
4870 * @qtype: the Ingress or Egress type for @qid
4871 * @pbar2_qoffset: BAR2 Queue Offset
4872 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4874 * Returns the BAR2 SGE Queue Registers information associated with the
4875 * indicated Absolute Queue ID. These are passed back in return value
4876 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4877 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4879 * This may return an error which indicates that BAR2 SGE Queue
4880 * registers aren't available. If an error is not returned, then the
4881 * following values are returned:
4883 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4884 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4886 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4887 * require the "Inferred Queue ID" ability may be used. E.g. the
4888 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4889 * then these "Inferred Queue ID" register may not be used.
4891 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
4892 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
4893 unsigned int *pbar2_qid)
4895 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4896 u64 bar2_page_offset, bar2_qoffset;
4897 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4900 * T4 doesn't support BAR2 SGE Queue registers.
4902 if (is_t4(adapter->params.chip))
4906 * Get our SGE Page Size parameters.
4908 page_shift = adapter->params.sge.hps + 10;
4909 page_size = 1 << page_shift;
4912 * Get the right Queues per Page parameters for our Queue.
4914 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
4915 adapter->params.sge.eq_qpp :
4916 adapter->params.sge.iq_qpp);
4917 qpp_mask = (1 << qpp_shift) - 1;
4920 * Calculate the basics of the BAR2 SGE Queue register area:
4921 * o The BAR2 page the Queue registers will be in.
4922 * o The BAR2 Queue ID.
4923 * o The BAR2 Queue ID Offset into the BAR2 page.
4925 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4926 bar2_qid = qid & qpp_mask;
4927 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4930 * If the BAR2 Queue ID Offset is less than the Page Size, then the
4931 * hardware will infer the Absolute Queue ID simply from the writes to
4932 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4933 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4934 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4935 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4936 * from the BAR2 Page and BAR2 Queue ID.
4938 * One important censequence of this is that some BAR2 SGE registers
4939 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4940 * there. But other registers synthesize the SGE Queue ID purely
4941 * from the writes to the registers -- the Write Combined Doorbell
4942 * Buffer is a good example. These BAR2 SGE Registers are only
4943 * available for those BAR2 SGE Register areas where the SGE Absolute
4944 * Queue ID can be inferred from simple writes.
4946 bar2_qoffset = bar2_page_offset;
4947 bar2_qinferred = (bar2_qid_offset < page_size);
4948 if (bar2_qinferred) {
4949 bar2_qoffset += bar2_qid_offset;
4953 *pbar2_qoffset = bar2_qoffset;
4954 *pbar2_qid = bar2_qid;
4959 * t4_init_sge_params - initialize adap->params.sge
4960 * @adapter: the adapter
4962 * Initialize various fields of the adapter's SGE Parameters structure.
4964 int t4_init_sge_params(struct adapter *adapter)
4966 struct sge_params *sge_params = &adapter->params.sge;
4968 unsigned int s_hps, s_qpp;
4971 * Extract the SGE Page Size for our PF.
4973 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
4974 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
4976 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
4979 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
4981 s_qpp = (S_QUEUESPERPAGEPF0 +
4982 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
4983 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
4984 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4985 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
4986 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4992 * t4_init_tp_params - initialize adap->params.tp
4993 * @adap: the adapter
4995 * Initialize various fields of the adapter's TP Parameters structure.
4997 int t4_init_tp_params(struct adapter *adap)
5002 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5003 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5004 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5006 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5007 for (chan = 0; chan < NCHAN; chan++)
5008 adap->params.tp.tx_modq[chan] = chan;
5011 * Cache the adapter's Compressed Filter Mode and global Incress
5014 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5015 &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
5016 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5017 &adap->params.tp.ingress_config, 1,
5018 A_TP_INGRESS_CONFIG);
5020 /* For T6, cache the adapter's compressed error vector
5021 * and passing outer header info for encapsulated packets.
5023 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5024 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
5025 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5029 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5030 * shift positions of several elements of the Compressed Filter Tuple
5031 * for this adapter which we need frequently ...
5033 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5034 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5035 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5036 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5040 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
5041 * represents the presense of an Outer VLAN instead of a VNIC ID.
5043 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
5044 adap->params.tp.vnic_shift = -1;
5050 * t4_filter_field_shift - calculate filter field shift
5051 * @adap: the adapter
5052 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5054 * Return the shift position of a filter field within the Compressed
5055 * Filter Tuple. The filter field is specified via its selection bit
5056 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5058 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5060 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5064 if ((filter_mode & filter_sel) == 0)
5067 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5068 switch (filter_mode & sel) {
5070 field_shift += W_FT_FCOE;
5073 field_shift += W_FT_PORT;
5076 field_shift += W_FT_VNIC_ID;
5079 field_shift += W_FT_VLAN;
5082 field_shift += W_FT_TOS;
5085 field_shift += W_FT_PROTOCOL;
5088 field_shift += W_FT_ETHERTYPE;
5091 field_shift += W_FT_MACMATCH;
5094 field_shift += W_FT_MPSHITTYPE;
5096 case F_FRAGMENTATION:
5097 field_shift += W_FT_FRAGMENTATION;
5104 int t4_init_rss_mode(struct adapter *adap, int mbox)
5107 struct fw_rss_vi_config_cmd rvc;
5109 memset(&rvc, 0, sizeof(rvc));
5111 for_each_port(adap, i) {
5112 struct port_info *p = adap2pinfo(adap, i);
5114 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5115 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5116 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5117 rvc.retval_len16 = htonl(FW_LEN16(rvc));
5118 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5121 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5126 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5128 unsigned int fw_caps = adap->params.fw_caps_support;
5129 fw_port_cap32_t pcaps, acaps;
5130 enum fw_port_type port_type;
5131 struct fw_port_cmd cmd;
5137 memset(&cmd, 0, sizeof(cmd));
5139 for_each_port(adap, i) {
5140 struct port_info *pi = adap2pinfo(adap, i);
5141 unsigned int rss_size = 0;
5143 while ((adap->params.portvec & (1 << j)) == 0)
5146 /* If we haven't yet determined whether we're talking to
5147 * Firmware which knows the new 32-bit Port Capabilities, it's
5148 * time to find out now. This will also tell new Firmware to
5149 * send us Port Status Updates using the new 32-bit Port
5150 * Capabilities version of the Port Information message.
5152 if (fw_caps == FW_CAPS_UNKNOWN) {
5153 u32 param, val, caps;
5155 caps = FW_PARAMS_PARAM_PFVF_PORT_CAPS32;
5156 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5157 V_FW_PARAMS_PARAM_X(caps));
5159 ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m,
5161 fw_caps = ret == 0 ? FW_CAPS32 : FW_CAPS16;
5162 adap->params.fw_caps_support = fw_caps;
5165 memset(&cmd, 0, sizeof(cmd));
5166 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5169 V_FW_PORT_CMD_PORTID(j));
5170 action = fw_caps == FW_CAPS16 ? FW_PORT_ACTION_GET_PORT_INFO :
5171 FW_PORT_ACTION_GET_PORT_INFO32;
5172 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
5174 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5178 /* Extract the various fields from the Port Information message.
5180 if (fw_caps == FW_CAPS16) {
5182 be32_to_cpu(cmd.u.info.lstatus_to_modtype);
5184 port_type = G_FW_PORT_CMD_PTYPE(lstatus);
5185 mdio_addr = (lstatus & F_FW_PORT_CMD_MDIOCAP) ?
5186 (int)G_FW_PORT_CMD_MDIOADDR(lstatus) : -1;
5187 pcaps = be16_to_cpu(cmd.u.info.pcap);
5188 acaps = be16_to_cpu(cmd.u.info.acap);
5189 pcaps = fwcaps16_to_caps32(pcaps);
5190 acaps = fwcaps16_to_caps32(acaps);
5193 be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5195 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5196 mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5197 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) :
5199 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5200 acaps = be32_to_cpu(cmd.u.info32.acaps32);
5203 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
5209 pi->rss_size = rss_size;
5210 t4_os_set_hw_addr(adap, i, addr);
5212 pi->port_type = port_type;
5213 pi->mdio_addr = mdio_addr;
5214 pi->mod_type = FW_PORT_MOD_TYPE_NA;
5216 init_link_config(&pi->link_cfg, pcaps, acaps);