1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Chelsio Communications.
6 #include <netinet/in.h>
8 #include <rte_interrupts.h>
10 #include <rte_debug.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_memory.h>
14 #include <rte_tailq.h>
16 #include <rte_alarm.h>
17 #include <rte_ether.h>
18 #include <ethdev_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_random.h>
22 #include <rte_byteorder.h>
26 #include "t4_regs_values.h"
27 #include "t4fw_interface.h"
30 * t4_read_mtu_tbl - returns the values in the HW path MTU table
32 * @mtus: where to store the MTU values
33 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
35 * Reads the HW path MTU table.
37 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
42 for (i = 0; i < NMTUS; ++i) {
43 t4_write_reg(adap, A_TP_MTU_TABLE,
44 V_MTUINDEX(0xff) | V_MTUVALUE(i));
45 v = t4_read_reg(adap, A_TP_MTU_TABLE);
46 mtus[i] = G_MTUVALUE(v);
48 mtu_log[i] = G_MTUWIDTH(v);
53 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
55 * @addr: the indirect TP register address
56 * @mask: specifies the field within the register to modify
57 * @val: new value for the field
59 * Sets a field of an indirect TP register to the given value.
61 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
62 unsigned int mask, unsigned int val)
64 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
65 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
66 t4_write_reg(adap, A_TP_PIO_DATA, val);
69 /* The minimum additive increment value for the congestion control table */
70 #define CC_MIN_INCR 2U
73 * t4_load_mtus - write the MTU and congestion control HW tables
75 * @mtus: the values for the MTU table
76 * @alpha: the values for the congestion control alpha parameter
77 * @beta: the values for the congestion control beta parameter
79 * Write the HW MTU table with the supplied MTUs and the high-speed
80 * congestion control table with the supplied alpha, beta, and MTUs.
81 * We write the two tables together because the additive increments
84 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
85 const unsigned short *alpha, const unsigned short *beta)
87 static const unsigned int avg_pkts[NCCTRL_WIN] = {
88 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
89 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
90 28672, 40960, 57344, 81920, 114688, 163840, 229376
95 for (i = 0; i < NMTUS; ++i) {
96 unsigned int mtu = mtus[i];
97 unsigned int log2 = cxgbe_fls(mtu);
99 if (!(mtu & ((1 << log2) >> 2))) /* round */
101 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
102 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
104 for (w = 0; w < NCCTRL_WIN; ++w) {
107 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
110 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
111 (w << 16) | (beta[w] << 13) | inc);
117 * t4_wait_op_done_val - wait until an operation is completed
118 * @adapter: the adapter performing the operation
119 * @reg: the register to check for completion
120 * @mask: a single-bit field within @reg that indicates completion
121 * @polarity: the value of the field when the operation is completed
122 * @attempts: number of check iterations
123 * @delay: delay in usecs between iterations
124 * @valp: where to store the value of the register at completion time
126 * Wait until an operation is completed by checking a bit in a register
127 * up to @attempts times. If @valp is not NULL the value of the register
128 * at the time it indicated completion is stored there. Returns 0 if the
129 * operation completes and -EAGAIN otherwise.
131 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
132 int polarity, int attempts, int delay, u32 *valp)
135 u32 val = t4_read_reg(adapter, reg);
137 if (!!(val & mask) == polarity) {
150 * t4_set_reg_field - set a register field to a value
151 * @adapter: the adapter to program
152 * @addr: the register address
153 * @mask: specifies the portion of the register to modify
154 * @val: the new value for the register field
156 * Sets a register field specified by the supplied mask to the
159 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
162 u32 v = t4_read_reg(adapter, addr) & ~mask;
164 t4_write_reg(adapter, addr, v | val);
165 (void)t4_read_reg(adapter, addr); /* flush */
169 * t4_read_indirect - read indirectly addressed registers
171 * @addr_reg: register holding the indirect address
172 * @data_reg: register holding the value of the indirect register
173 * @vals: where the read register values are stored
174 * @nregs: how many indirect registers to read
175 * @start_idx: index of first indirect register to read
177 * Reads registers that are accessed indirectly through an address/data
180 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
181 unsigned int data_reg, u32 *vals, unsigned int nregs,
182 unsigned int start_idx)
185 t4_write_reg(adap, addr_reg, start_idx);
186 *vals++ = t4_read_reg(adap, data_reg);
192 * t4_write_indirect - write indirectly addressed registers
194 * @addr_reg: register holding the indirect addresses
195 * @data_reg: register holding the value for the indirect registers
196 * @vals: values to write
197 * @nregs: how many indirect registers to write
198 * @start_idx: address of first indirect register to write
200 * Writes a sequential block of registers that are accessed indirectly
201 * through an address/data register pair.
203 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
204 unsigned int data_reg, const u32 *vals,
205 unsigned int nregs, unsigned int start_idx)
208 t4_write_reg(adap, addr_reg, start_idx++);
209 t4_write_reg(adap, data_reg, *vals++);
214 * t4_report_fw_error - report firmware error
217 * The adapter firmware can indicate error conditions to the host.
218 * If the firmware has indicated an error, print out the reason for
219 * the firmware error.
221 static void t4_report_fw_error(struct adapter *adap)
223 static const char * const reason[] = {
224 "Crash", /* PCIE_FW_EVAL_CRASH */
225 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
226 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
227 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
228 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
229 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
230 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
231 "Reserved", /* reserved */
235 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
236 if (pcie_fw & F_PCIE_FW_ERR)
237 pr_err("%s: Firmware reports adapter error: %s\n",
238 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
242 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
244 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
247 for ( ; nflit; nflit--, mbox_addr += 8)
248 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
252 * Handle a FW assertion reported in a mailbox.
254 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
256 struct fw_debug_cmd asrt;
258 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
259 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
260 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
261 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
264 #define X_CIM_PF_NOACCESS 0xeeeeeeee
267 * If the Host OS Driver needs locking arround accesses to the mailbox, this
268 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
270 /* makes single-statement usage a bit cleaner ... */
271 #ifdef T4_OS_NEEDS_MBOX_LOCKING
272 #define T4_OS_MBOX_LOCKING(x) x
274 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
278 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
280 * @mbox: index of the mailbox to use
281 * @cmd: the command to write
282 * @size: command length in bytes
283 * @rpl: where to optionally store the reply
284 * @sleep_ok: if true we may sleep while awaiting command completion
285 * @timeout: time to wait for command to finish before timing out
286 * (negative implies @sleep_ok=false)
288 * Sends the given command to FW through the selected mailbox and waits
289 * for the FW to execute the command. If @rpl is not %NULL it is used to
290 * store the FW's reply to the command. The command and its optional
291 * reply are of the same length. Some FW commands like RESET and
292 * INITIALIZE can take a considerable amount of time to execute.
293 * @sleep_ok determines whether we may sleep while awaiting the response.
294 * If sleeping is allowed we use progressive backoff otherwise we spin.
295 * Note that passing in a negative @timeout is an alternate mechanism
296 * for specifying @sleep_ok=false. This is useful when a higher level
297 * interface allows for specification of @timeout but not @sleep_ok ...
299 * Returns 0 on success or a negative errno on failure. A
300 * failure can happen either because we are not able to execute the
301 * command or FW executes it but signals an error. In the latter case
302 * the return value is the error code indicated by FW (negated).
304 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
305 const void __attribute__((__may_alias__)) *cmd,
306 int size, void *rpl, bool sleep_ok, int timeout)
309 * We delay in small increments at first in an effort to maintain
310 * responsiveness for simple, fast executing commands but then back
311 * off to larger delays to a maximum retry delay.
313 static const int delay[] = {
314 1, 1, 3, 5, 10, 10, 20, 50, 100
320 unsigned int delay_idx;
321 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
323 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
324 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
326 struct mbox_entry entry;
332 if ((size & 15) || size > MBOX_LEN) {
338 memcpy(p, (const __be64 *)cmd, size);
341 * If we have a negative timeout, that implies that we can't sleep.
348 #ifdef T4_OS_NEEDS_MBOX_LOCKING
350 * Queue ourselves onto the mailbox access list. When our entry is at
351 * the front of the list, we have rights to access the mailbox. So we
352 * wait [for a while] till we're at the front [or bail out with an
355 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
360 for (i = 0; ; i += ms) {
362 * If we've waited too long, return a busy indication. This
363 * really ought to be based on our initial position in the
364 * mailbox access list but this is a start. We very rarely
365 * contend on access to the mailbox ... Also check for a
366 * firmware error which we'll report as a device error.
368 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
369 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
370 t4_os_atomic_list_del(&entry, &adap->mbox_list,
372 t4_report_fw_error(adap);
374 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
378 * If we're at the head, break out and start the mailbox
381 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
385 * Delay for a bit before checking again ...
388 ms = delay[delay_idx]; /* last element may repeat */
389 if (delay_idx < ARRAY_SIZE(delay) - 1)
396 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
399 * Attempt to gain access to the mailbox.
401 for (i = 0; i < 4; i++) {
402 ctl = t4_read_reg(adap, ctl_reg);
404 if (v != X_MBOWNER_NONE)
409 * If we were unable to gain access, dequeue ourselves from the
410 * mailbox atomic access list and report the error to our caller.
412 if (v != X_MBOWNER_PL) {
413 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
416 t4_report_fw_error(adap);
418 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
422 * If we gain ownership of the mailbox and there's a "valid" message
423 * in it, this is likely an asynchronous error message from the
424 * firmware. So we'll report that and then proceed on with attempting
425 * to issue our own command ... which may well fail if the error
426 * presaged the firmware crashing ...
428 if (ctl & F_MBMSGVALID) {
429 dev_err(adap, "found VALID command in mbox %u: "
430 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
431 (unsigned long long)t4_read_reg64(adap, data_reg),
432 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
433 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
434 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
435 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
436 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
437 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
438 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
442 * Copy in the new mailbox command and send it on its way ...
444 for (i = 0; i < size; i += 8, p++)
445 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
447 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
448 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
449 (unsigned long long)t4_read_reg64(adap, data_reg),
450 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
451 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
452 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
453 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
454 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
455 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
456 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
458 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
459 t4_read_reg(adap, ctl_reg); /* flush write */
465 * Loop waiting for the reply; bail out if we time out or the firmware
468 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
469 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
471 ms = delay[delay_idx]; /* last element may repeat */
472 if (delay_idx < ARRAY_SIZE(delay) - 1)
479 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
480 v = t4_read_reg(adap, ctl_reg);
481 if (v == X_CIM_PF_NOACCESS)
483 if (G_MBOWNER(v) == X_MBOWNER_PL) {
484 if (!(v & F_MBMSGVALID)) {
485 t4_write_reg(adap, ctl_reg,
486 V_MBOWNER(X_MBOWNER_NONE));
490 CXGBE_DEBUG_MBOX(adap,
491 "%s: mbox %u: %016llx %016llx %016llx %016llx "
492 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
493 (unsigned long long)t4_read_reg64(adap, data_reg),
494 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
495 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
496 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
497 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
498 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
499 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
500 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
502 CXGBE_DEBUG_MBOX(adap,
503 "command %#x completed in %d ms (%ssleeping)\n",
505 i + ms, sleep_ok ? "" : "non-");
507 res = t4_read_reg64(adap, data_reg);
508 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
509 fw_asrt(adap, data_reg);
510 res = V_FW_CMD_RETVAL(EIO);
512 get_mbox_rpl(adap, rpl, size / 8, data_reg);
514 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
516 t4_os_atomic_list_del(&entry, &adap->mbox_list,
519 return -G_FW_CMD_RETVAL((int)res);
524 * We timed out waiting for a reply to our mailbox command. Report
525 * the error and also check to see if the firmware reported any
528 dev_err(adap, "command %#x in mailbox %d timed out\n",
529 *(const u8 *)cmd, mbox);
530 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
533 t4_report_fw_error(adap);
535 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
538 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
539 void *rpl, bool sleep_ok)
541 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
546 * t4_get_regs_len - return the size of the chips register set
547 * @adapter: the adapter
549 * Returns the size of the chip's BAR0 register space.
551 unsigned int t4_get_regs_len(struct adapter *adapter)
553 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
555 switch (chip_version) {
558 return T5_REGMAP_SIZE;
562 "Unsupported chip version %d\n", chip_version);
567 * t4_get_regs - read chip registers into provided buffer
569 * @buf: register buffer
570 * @buf_size: size (in bytes) of register buffer
572 * If the provided register buffer isn't large enough for the chip's
573 * full register range, the register dump will be truncated to the
574 * register buffer's size.
576 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
578 static const unsigned int t5_reg_ranges[] = {
1353 static const unsigned int t6_reg_ranges[] = {
1914 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1915 const unsigned int *reg_ranges;
1916 int reg_ranges_size, range;
1917 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1919 /* Select the right set of register ranges to dump depending on the
1920 * adapter chip type.
1922 switch (chip_version) {
1924 reg_ranges = t5_reg_ranges;
1925 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1929 reg_ranges = t6_reg_ranges;
1930 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1935 "Unsupported chip version %d\n", chip_version);
1939 /* Clear the register buffer and insert the appropriate register
1940 * values selected by the above register ranges.
1942 memset(buf, 0, buf_size);
1943 for (range = 0; range < reg_ranges_size; range += 2) {
1944 unsigned int reg = reg_ranges[range];
1945 unsigned int last_reg = reg_ranges[range + 1];
1946 u32 *bufp = (u32 *)((char *)buf + reg);
1948 /* Iterate across the register range filling in the register
1949 * buffer but don't write past the end of the register buffer.
1951 while (reg <= last_reg && bufp < buf_end) {
1952 *bufp++ = t4_read_reg(adap, reg);
1958 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1959 #define EEPROM_DELAY 10 /* 10us per poll spin */
1960 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1962 #define EEPROM_STAT_ADDR 0x7bfc
1965 * Small utility function to wait till any outstanding VPD Access is complete.
1966 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1967 * VPD Access in flight. This allows us to handle the problem of having a
1968 * previous VPD Access time out and prevent an attempt to inject a new VPD
1969 * Request before any in-flight VPD request has completed.
1971 static int t4_seeprom_wait(struct adapter *adapter)
1973 unsigned int base = adapter->params.pci.vpd_cap_addr;
1976 /* If no VPD Access is in flight, we can just return success right
1979 if (!adapter->vpd_busy)
1982 /* Poll the VPD Capability Address/Flag register waiting for it
1983 * to indicate that the operation is complete.
1985 max_poll = EEPROM_MAX_POLL;
1989 udelay(EEPROM_DELAY);
1990 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
1992 /* If the operation is complete, mark the VPD as no longer
1993 * busy and return success.
1995 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
1996 adapter->vpd_busy = 0;
1999 } while (--max_poll);
2001 /* Failure! Note that we leave the VPD Busy status set in order to
2002 * avoid pushing a new VPD Access request into the VPD Capability till
2003 * the current operation eventually succeeds. It's a bug to issue a
2004 * new request when an existing request is in flight and will result
2005 * in corrupt hardware state.
2011 * t4_seeprom_read - read a serial EEPROM location
2012 * @adapter: adapter to read
2013 * @addr: EEPROM virtual address
2014 * @data: where to store the read data
2016 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2017 * VPD capability. Note that this function must be called with a virtual
2020 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2022 unsigned int base = adapter->params.pci.vpd_cap_addr;
2025 /* VPD Accesses must alway be 4-byte aligned!
2027 if (addr >= EEPROMVSIZE || (addr & 3))
2030 /* Wait for any previous operation which may still be in flight to
2033 ret = t4_seeprom_wait(adapter);
2035 dev_err(adapter, "VPD still busy from previous operation\n");
2039 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2040 * for our request to complete. If it doesn't complete, note the
2041 * error and return it to our caller. Note that we do not reset the
2044 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2045 adapter->vpd_busy = 1;
2046 adapter->vpd_flag = PCI_VPD_ADDR_F;
2047 ret = t4_seeprom_wait(adapter);
2049 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2053 /* Grab the returned data, swizzle it into our endianness and
2056 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2057 *data = le32_to_cpu(*data);
2062 * t4_seeprom_write - write a serial EEPROM location
2063 * @adapter: adapter to write
2064 * @addr: virtual EEPROM address
2065 * @data: value to write
2067 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2068 * VPD capability. Note that this function must be called with a virtual
2071 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2073 unsigned int base = adapter->params.pci.vpd_cap_addr;
2078 /* VPD Accesses must alway be 4-byte aligned!
2080 if (addr >= EEPROMVSIZE || (addr & 3))
2083 /* Wait for any previous operation which may still be in flight to
2086 ret = t4_seeprom_wait(adapter);
2088 dev_err(adapter, "VPD still busy from previous operation\n");
2092 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2093 * for our request to complete. If it doesn't complete, note the
2094 * error and return it to our caller. Note that we do not reset the
2097 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2099 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2100 (u16)addr | PCI_VPD_ADDR_F);
2101 adapter->vpd_busy = 1;
2102 adapter->vpd_flag = 0;
2103 ret = t4_seeprom_wait(adapter);
2105 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2109 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2110 * request to complete. If it doesn't complete, return error.
2112 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2113 max_poll = EEPROM_MAX_POLL;
2115 udelay(EEPROM_DELAY);
2116 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2117 } while ((stats_reg & 0x1) && --max_poll);
2121 /* Return success! */
2126 * t4_seeprom_wp - enable/disable EEPROM write protection
2127 * @adapter: the adapter
2128 * @enable: whether to enable or disable write protection
2130 * Enables or disables write protection on the serial EEPROM.
2132 int t4_seeprom_wp(struct adapter *adapter, int enable)
2134 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2138 * t4_fw_tp_pio_rw - Access TP PIO through LDST
2139 * @adap: the adapter
2140 * @vals: where the indirect register values are stored/written
2141 * @nregs: how many indirect registers to read/write
2142 * @start_idx: index of first indirect register to read/write
2143 * @rw: Read (1) or Write (0)
2145 * Access TP PIO registers through LDST
2147 void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
2148 unsigned int start_index, unsigned int rw)
2150 int cmd = FW_LDST_ADDRSPC_TP_PIO;
2151 struct fw_ldst_cmd c;
2155 for (i = 0 ; i < nregs; i++) {
2156 memset(&c, 0, sizeof(c));
2157 c.op_to_addrspace = cpu_to_be32(V_FW_CMD_OP(FW_LDST_CMD) |
2159 (rw ? F_FW_CMD_READ :
2161 V_FW_LDST_CMD_ADDRSPACE(cmd));
2162 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
2164 c.u.addrval.addr = cpu_to_be32(start_index + i);
2165 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
2166 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
2169 vals[i] = be32_to_cpu(c.u.addrval.val);
2175 * t4_read_rss_key - read the global RSS key
2176 * @adap: the adapter
2177 * @key: 10-entry array holding the 320-bit RSS key
2179 * Reads the global 320-bit RSS key.
2181 void t4_read_rss_key(struct adapter *adap, u32 *key)
2183 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 1);
2187 * t4_write_rss_key - program one of the RSS keys
2188 * @adap: the adapter
2189 * @key: 10-entry array holding the 320-bit RSS key
2190 * @idx: which RSS key to write
2192 * Writes one of the RSS keys with the given 320-bit value. If @idx is
2193 * 0..15 the corresponding entry in the RSS key table is written,
2194 * otherwise the global RSS key is written.
2196 void t4_write_rss_key(struct adapter *adap, u32 *key, int idx)
2198 u32 vrt = t4_read_reg(adap, A_TP_RSS_CONFIG_VRT);
2199 u8 rss_key_addr_cnt = 16;
2201 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
2202 * allows access to key addresses 16-63 by using KeyWrAddrX
2203 * as index[5:4](upper 2) into key table
2205 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
2206 (vrt & F_KEYEXTEND) && (G_KEYMODE(vrt) == 3))
2207 rss_key_addr_cnt = 32;
2209 t4_fw_tp_pio_rw(adap, key, 10, A_TP_RSS_SECRET_KEY0, 0);
2211 if (idx >= 0 && idx < rss_key_addr_cnt) {
2212 if (rss_key_addr_cnt > 16)
2213 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2214 V_KEYWRADDRX(idx >> 4) |
2215 V_T6_VFWRADDR(idx) | F_KEYWREN);
2217 t4_write_reg(adap, A_TP_RSS_CONFIG_VRT,
2218 V_KEYWRADDR(idx) | F_KEYWREN);
2223 * t4_config_rss_range - configure a portion of the RSS mapping table
2224 * @adapter: the adapter
2225 * @mbox: mbox to use for the FW command
2226 * @viid: virtual interface whose RSS subtable is to be written
2227 * @start: start entry in the table to write
2228 * @n: how many table entries to write
2229 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2230 * @nrspq: number of values in @rspq
2232 * Programs the selected part of the VI's RSS mapping table with the
2233 * provided values. If @nrspq < @n the supplied values are used repeatedly
2234 * until the full table range is populated.
2236 * The caller must ensure the values in @rspq are in the range allowed for
2239 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2240 int start, int n, const u16 *rspq, unsigned int nrspq)
2243 const u16 *rsp = rspq;
2244 const u16 *rsp_end = rspq + nrspq;
2245 struct fw_rss_ind_tbl_cmd cmd;
2247 memset(&cmd, 0, sizeof(cmd));
2248 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2249 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2250 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2251 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2254 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2255 * Queue Identifiers. These Ingress Queue IDs are packed three to
2256 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2260 int nq = min(n, 32);
2262 __be32 *qp = &cmd.iq0_to_iq2;
2265 * Set up the firmware RSS command header to send the next
2266 * "nq" Ingress Queue IDs to the firmware.
2268 cmd.niqid = cpu_to_be16(nq);
2269 cmd.startidx = cpu_to_be16(start);
2272 * "nq" more done for the start of the next loop.
2278 * While there are still Ingress Queue IDs to stuff into the
2279 * current firmware RSS command, retrieve them from the
2280 * Ingress Queue ID array and insert them into the command.
2284 * Grab up to the next 3 Ingress Queue IDs (wrapping
2285 * around the Ingress Queue ID array if necessary) and
2286 * insert them into the firmware RSS command at the
2287 * current 3-tuple position within the commad.
2291 int nqbuf = min(3, nq);
2297 while (nqbuf && nq_packed < 32) {
2304 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2305 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2306 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2310 * Send this portion of the RRS table update to the firmware;
2311 * bail out on any errors.
2313 if (is_pf4(adapter))
2314 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd),
2317 ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
2326 * t4_config_vi_rss - configure per VI RSS settings
2327 * @adapter: the adapter
2328 * @mbox: mbox to use for the FW command
2331 * @defq: id of the default RSS queue for the VI.
2333 * Configures VI-specific RSS properties.
2335 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2336 unsigned int flags, unsigned int defq)
2338 struct fw_rss_vi_config_cmd c;
2340 memset(&c, 0, sizeof(c));
2341 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2342 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2343 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2344 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2345 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2346 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2347 if (is_pf4(adapter))
2348 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2350 return t4vf_wr_mbox(adapter, &c, sizeof(c), NULL);
2354 * t4_read_config_vi_rss - read the configured per VI RSS settings
2355 * @adapter: the adapter
2356 * @mbox: mbox to use for the FW command
2358 * @flags: where to place the configured flags
2359 * @defq: where to place the id of the default RSS queue for the VI.
2361 * Read configured VI-specific RSS properties.
2363 int t4_read_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2364 u64 *flags, unsigned int *defq)
2366 struct fw_rss_vi_config_cmd c;
2367 unsigned int result;
2370 memset(&c, 0, sizeof(c));
2371 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2372 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2373 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2374 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2375 ret = t4_wr_mbox(adapter, mbox, &c, sizeof(c), &c);
2377 result = be32_to_cpu(c.u.basicvirtual.defaultq_to_udpen);
2379 *defq = G_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(result);
2381 *flags = result & M_FW_RSS_VI_CONFIG_CMD_DEFAULTQ;
2388 * init_cong_ctrl - initialize congestion control parameters
2389 * @a: the alpha values for congestion control
2390 * @b: the beta values for congestion control
2392 * Initialize the congestion control parameters.
2394 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2398 for (i = 0; i < 9; i++) {
2452 #define INIT_CMD(var, cmd, rd_wr) do { \
2453 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2454 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2455 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2458 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2460 u32 cclk_param, cclk_val;
2464 * Ask firmware for the Core Clock since it knows how to translate the
2465 * Reference Clock ('V2') VPD field into a Core Clock value ...
2467 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2468 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2469 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2470 1, &cclk_param, &cclk_val);
2472 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2478 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2483 * t4_get_pfres - retrieve VF resource limits
2484 * @adapter: the adapter
2486 * Retrieves configured resource limits and capabilities for a physical
2487 * function. The results are stored in @adapter->pfres.
2489 int t4_get_pfres(struct adapter *adapter)
2491 struct pf_resources *pfres = &adapter->params.pfres;
2492 struct fw_pfvf_cmd cmd, rpl;
2497 * Execute PFVF Read command to get VF resource limits; bail out early
2498 * with error on command failure.
2500 memset(&cmd, 0, sizeof(cmd));
2501 cmd.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PFVF_CMD) |
2504 V_FW_PFVF_CMD_PFN(adapter->pf) |
2505 V_FW_PFVF_CMD_VFN(0));
2506 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2507 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2508 if (v != FW_SUCCESS)
2512 * Extract PF resource limits and return success.
2514 word = be32_to_cpu(rpl.niqflint_niq);
2515 pfres->niqflint = G_FW_PFVF_CMD_NIQFLINT(word);
2517 word = be32_to_cpu(rpl.type_to_neq);
2518 pfres->neq = G_FW_PFVF_CMD_NEQ(word);
2520 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2521 pfres->nethctrl = G_FW_PFVF_CMD_NETHCTRL(word);
2526 /* serial flash and firmware constants and flash config file constants */
2528 SF_ATTEMPTS = 10, /* max retries for SF operations */
2530 /* flash command opcodes */
2531 SF_PROG_PAGE = 2, /* program page */
2532 SF_WR_DISABLE = 4, /* disable writes */
2533 SF_RD_STATUS = 5, /* read status register */
2534 SF_WR_ENABLE = 6, /* enable writes */
2535 SF_RD_DATA_FAST = 0xb, /* read flash */
2536 SF_RD_ID = 0x9f, /* read ID */
2537 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2541 * sf1_read - read data from the serial flash
2542 * @adapter: the adapter
2543 * @byte_cnt: number of bytes to read
2544 * @cont: whether another operation will be chained
2545 * @lock: whether to lock SF for PL access only
2546 * @valp: where to store the read data
2548 * Reads up to 4 bytes of data from the serial flash. The location of
2549 * the read needs to be specified prior to calling this by issuing the
2550 * appropriate commands to the serial flash.
2552 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2553 int lock, u32 *valp)
2557 if (!byte_cnt || byte_cnt > 4)
2559 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2561 t4_write_reg(adapter, A_SF_OP,
2562 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2563 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2565 *valp = t4_read_reg(adapter, A_SF_DATA);
2570 * sf1_write - write data to the serial flash
2571 * @adapter: the adapter
2572 * @byte_cnt: number of bytes to write
2573 * @cont: whether another operation will be chained
2574 * @lock: whether to lock SF for PL access only
2575 * @val: value to write
2577 * Writes up to 4 bytes of data to the serial flash. The location of
2578 * the write needs to be specified prior to calling this by issuing the
2579 * appropriate commands to the serial flash.
2581 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2584 if (!byte_cnt || byte_cnt > 4)
2586 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2588 t4_write_reg(adapter, A_SF_DATA, val);
2589 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2590 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2591 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2595 * t4_read_flash - read words from serial flash
2596 * @adapter: the adapter
2597 * @addr: the start address for the read
2598 * @nwords: how many 32-bit words to read
2599 * @data: where to store the read data
2600 * @byte_oriented: whether to store data as bytes or as words
2602 * Read the specified number of 32-bit words from the serial flash.
2603 * If @byte_oriented is set the read data is stored as a byte array
2604 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2605 * natural endianness.
2607 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2608 unsigned int nwords, u32 *data, int byte_oriented)
2612 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2616 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2618 ret = sf1_write(adapter, 4, 1, 0, addr);
2622 ret = sf1_read(adapter, 1, 1, 0, data);
2626 for ( ; nwords; nwords--, data++) {
2627 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2629 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2633 *data = cpu_to_be32(*data);
2639 * t4_get_exprom_version - return the Expansion ROM version (if any)
2640 * @adapter: the adapter
2641 * @vers: where to place the version
2643 * Reads the Expansion ROM header from FLASH and returns the version
2644 * number (if present) through the @vers return value pointer. We return
2645 * this in the Firmware Version Format since it's convenient. Return
2646 * 0 on success, -ENOENT if no Expansion ROM is present.
2648 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2650 struct exprom_header {
2651 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2652 unsigned char hdr_ver[4]; /* Expansion ROM version */
2654 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2658 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2659 ARRAY_SIZE(exprom_header_buf),
2660 exprom_header_buf, 0);
2664 hdr = (struct exprom_header *)exprom_header_buf;
2665 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2668 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2669 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2670 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2671 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2676 * t4_get_fw_version - read the firmware version
2677 * @adapter: the adapter
2678 * @vers: where to place the version
2680 * Reads the FW version from flash.
2682 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2684 return t4_read_flash(adapter, FLASH_FW_START +
2685 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2689 * t4_get_bs_version - read the firmware bootstrap version
2690 * @adapter: the adapter
2691 * @vers: where to place the version
2693 * Reads the FW Bootstrap version from flash.
2695 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2697 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2698 offsetof(struct fw_hdr, fw_ver), 1,
2703 * t4_get_tp_version - read the TP microcode version
2704 * @adapter: the adapter
2705 * @vers: where to place the version
2707 * Reads the TP microcode version from flash.
2709 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2711 return t4_read_flash(adapter, FLASH_FW_START +
2712 offsetof(struct fw_hdr, tp_microcode_ver),
2717 * t4_get_version_info - extract various chip/firmware version information
2718 * @adapter: the adapter
2720 * Reads various chip/firmware version numbers and stores them into the
2721 * adapter Adapter Parameters structure. If any of the efforts fails
2722 * the first failure will be returned, but all of the version numbers
2725 int t4_get_version_info(struct adapter *adapter)
2729 #define FIRST_RET(__getvinfo) \
2731 int __ret = __getvinfo; \
2732 if (__ret && !ret) \
2736 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2737 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2738 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2739 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2747 * t4_dump_version_info - dump all of the adapter configuration IDs
2748 * @adapter: the adapter
2750 * Dumps all of the various bits of adapter configuration version/revision
2751 * IDs information. This is typically called at some point after
2752 * t4_get_version_info() has been called.
2754 void t4_dump_version_info(struct adapter *adapter)
2757 * Device information.
2759 dev_info(adapter, "Chelsio rev %d\n",
2760 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2765 if (!adapter->params.fw_vers)
2766 dev_warn(adapter, "No firmware loaded\n");
2768 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2769 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2770 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2771 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2772 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2775 * Bootstrap Firmware Version.
2777 if (!adapter->params.bs_vers)
2778 dev_warn(adapter, "No bootstrap loaded\n");
2780 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2781 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2782 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2783 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2784 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2787 * TP Microcode Version.
2789 if (!adapter->params.tp_vers)
2790 dev_warn(adapter, "No TP Microcode loaded\n");
2792 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2793 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2794 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2795 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2796 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2799 * Expansion ROM version.
2801 if (!adapter->params.er_vers)
2802 dev_info(adapter, "No Expansion ROM loaded\n");
2804 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2805 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2806 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2807 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2808 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2812 * t4_link_l1cfg_core - apply link configuration to MAC/PHY
2813 * @pi: the port info
2814 * @caps: link capabilities to configure
2815 * @sleep_ok: if true we may sleep while awaiting command completion
2817 * Set up a port's MAC and PHY according to a desired link configuration.
2818 * - If the PHY can auto-negotiate first decide what to advertise, then
2819 * enable/disable auto-negotiation as desired, and reset.
2820 * - If the PHY does not auto-negotiate just reset it.
2821 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2822 * otherwise do it later based on the outcome of auto-negotiation.
2824 int t4_link_l1cfg_core(struct port_info *pi, u32 caps, u8 sleep_ok)
2826 struct link_config *lc = &pi->link_cfg;
2827 struct adapter *adap = pi->adapter;
2828 struct fw_port_cmd cmd;
2831 memset(&cmd, 0, sizeof(cmd));
2832 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2833 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2834 V_FW_PORT_CMD_PORTID(pi->port_id));
2835 cmd.action_to_len16 =
2836 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG32) |
2839 cmd.u.l1cfg32.rcap32 = cpu_to_be32(caps);
2842 ret = t4_wr_mbox(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2844 ret = t4_wr_mbox_ns(adap, adap->mbox, &cmd, sizeof(cmd), NULL);
2846 if (ret == FW_SUCCESS)
2847 lc->link_caps = caps;
2850 "Requested Port Capabilities %#x rejected, error %d\n",
2857 * t4_flash_cfg_addr - return the address of the flash configuration file
2858 * @adapter: the adapter
2860 * Return the address within the flash where the Firmware Configuration
2861 * File is stored, or an error if the device FLASH is too small to contain
2862 * a Firmware Configuration File.
2864 int t4_flash_cfg_addr(struct adapter *adapter)
2867 * If the device FLASH isn't large enough to hold a Firmware
2868 * Configuration File, return an error.
2870 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2873 return FLASH_CFG_START;
2876 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2879 * t4_intr_enable - enable interrupts
2880 * @adapter: the adapter whose interrupts should be enabled
2882 * Enable PF-specific interrupts for the calling function and the top-level
2883 * interrupt concentrator for global interrupts. Interrupts are already
2884 * enabled at each module, here we just enable the roots of the interrupt
2887 * Note: this function should be called only when the driver manages
2888 * non PF-specific interrupts from the various HW modules. Only one PCI
2889 * function at a time should be doing this.
2891 void t4_intr_enable(struct adapter *adapter)
2894 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2895 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2896 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2898 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2899 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
2900 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2901 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2902 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
2903 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2904 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2905 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2906 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
2907 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2908 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2912 * t4_intr_disable - disable interrupts
2913 * @adapter: the adapter whose interrupts should be disabled
2915 * Disable interrupts. We only disable the top-level interrupt
2916 * concentrators. The caller must be a PCI function managing global
2919 void t4_intr_disable(struct adapter *adapter)
2921 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2922 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2923 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2925 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2926 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2930 * t4_get_port_type_description - return Port Type string description
2931 * @port_type: firmware Port Type enumeration
2933 const char *t4_get_port_type_description(enum fw_port_type port_type)
2935 static const char * const port_type_description[] = {
2960 if (port_type < ARRAY_SIZE(port_type_description))
2961 return port_type_description[port_type];
2966 * t4_get_mps_bg_map - return the buffer groups associated with a port
2967 * @adap: the adapter
2968 * @pidx: the port index
2970 * Returns a bitmap indicating which MPS buffer groups are associated
2971 * with the given port. Bit i is set if buffer group i is used by the
2974 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
2976 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2977 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
2980 if (pidx >= nports) {
2981 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
2986 switch (chip_version) {
2991 case 2: return 3 << (2 * pidx);
2992 case 4: return 1 << pidx;
2998 case 2: return 1 << (2 * pidx);
3003 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
3004 chip_version, nports);
3009 * t4_get_tp_ch_map - return TP ingress channels associated with a port
3010 * @adapter: the adapter
3011 * @pidx: the port index
3013 * Returns a bitmap indicating which TP Ingress Channels are associated with
3014 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
3016 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
3018 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
3019 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
3022 if (pidx >= nports) {
3023 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
3028 switch (chip_version) {
3031 /* Note that this happens to be the same values as the MPS
3032 * Buffer Group Map for these Chips. But we replicate the code
3033 * here because they're really separate concepts.
3037 case 2: return 3 << (2 * pidx);
3038 case 4: return 1 << pidx;
3044 case 2: return 1 << pidx;
3049 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
3050 chip_version, nports);
3055 * t4_get_port_stats - collect port statistics
3056 * @adap: the adapter
3057 * @idx: the port index
3058 * @p: the stats structure to fill
3060 * Collect statistics related to the given port from HW.
3062 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
3064 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3065 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
3067 #define GET_STAT(name) \
3068 t4_read_reg64(adap, \
3069 (is_t4(adap->params.chip) ? \
3070 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
3071 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
3072 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
3074 p->tx_octets = GET_STAT(TX_PORT_BYTES);
3075 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
3076 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
3077 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
3078 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
3079 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
3080 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
3081 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
3082 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
3083 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
3084 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
3085 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
3086 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
3087 p->tx_drop = GET_STAT(TX_PORT_DROP);
3088 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
3089 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
3090 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
3091 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
3092 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
3093 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
3094 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
3095 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
3096 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
3098 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3099 if (stat_ctl & F_COUNTPAUSESTATTX) {
3100 p->tx_frames -= p->tx_pause;
3101 p->tx_octets -= p->tx_pause * 64;
3103 if (stat_ctl & F_COUNTPAUSEMCTX)
3104 p->tx_mcast_frames -= p->tx_pause;
3107 p->rx_octets = GET_STAT(RX_PORT_BYTES);
3108 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
3109 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
3110 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
3111 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
3112 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
3113 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
3114 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
3115 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
3116 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
3117 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
3118 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
3119 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3120 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3121 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3122 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3123 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3124 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3125 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3126 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3127 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3128 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3129 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3130 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3131 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3132 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3133 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3135 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3136 if (stat_ctl & F_COUNTPAUSESTATRX) {
3137 p->rx_frames -= p->rx_pause;
3138 p->rx_octets -= p->rx_pause * 64;
3140 if (stat_ctl & F_COUNTPAUSEMCRX)
3141 p->rx_mcast_frames -= p->rx_pause;
3144 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3145 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3146 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3147 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3148 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3149 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3150 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3151 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3158 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3159 * @adap: The adapter
3161 * @stats: Current stats to fill
3162 * @offset: Previous stats snapshot
3164 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3165 struct port_stats *stats,
3166 struct port_stats *offset)
3171 t4_get_port_stats(adap, idx, stats);
3172 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3173 i < (sizeof(struct port_stats) / sizeof(u64));
3179 * t4_clr_port_stats - clear port statistics
3180 * @adap: the adapter
3181 * @idx: the port index
3183 * Clear HW statistics for the given port.
3185 void t4_clr_port_stats(struct adapter *adap, int idx)
3188 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3191 if (is_t4(adap->params.chip))
3192 port_base_addr = PORT_BASE(idx);
3194 port_base_addr = T5_PORT_BASE(idx);
3196 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3197 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3198 t4_write_reg(adap, port_base_addr + i, 0);
3199 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3200 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3201 t4_write_reg(adap, port_base_addr + i, 0);
3202 for (i = 0; i < 4; i++)
3203 if (bgmap & (1 << i)) {
3205 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3208 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3214 * t4_fw_hello - establish communication with FW
3215 * @adap: the adapter
3216 * @mbox: mailbox to use for the FW command
3217 * @evt_mbox: mailbox to receive async FW events
3218 * @master: specifies the caller's willingness to be the device master
3219 * @state: returns the current device state (if non-NULL)
3221 * Issues a command to establish communication with FW. Returns either
3222 * an error (negative integer) or the mailbox of the Master PF.
3224 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3225 enum dev_master master, enum dev_state *state)
3228 struct fw_hello_cmd c;
3230 unsigned int master_mbox;
3231 int retries = FW_CMD_HELLO_RETRIES;
3234 memset(&c, 0, sizeof(c));
3235 INIT_CMD(c, HELLO, WRITE);
3236 c.err_to_clearinit = cpu_to_be32(
3237 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3238 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3239 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3240 M_FW_HELLO_CMD_MBMASTER) |
3241 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3242 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3243 F_FW_HELLO_CMD_CLEARINIT);
3246 * Issue the HELLO command to the firmware. If it's not successful
3247 * but indicates that we got a "busy" or "timeout" condition, retry
3248 * the HELLO until we exhaust our retry limit. If we do exceed our
3249 * retry limit, check to see if the firmware left us any error
3250 * information and report that if so ...
3252 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3253 if (ret != FW_SUCCESS) {
3254 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3256 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3257 t4_report_fw_error(adap);
3261 v = be32_to_cpu(c.err_to_clearinit);
3262 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3264 if (v & F_FW_HELLO_CMD_ERR)
3265 *state = DEV_STATE_ERR;
3266 else if (v & F_FW_HELLO_CMD_INIT)
3267 *state = DEV_STATE_INIT;
3269 *state = DEV_STATE_UNINIT;
3273 * If we're not the Master PF then we need to wait around for the
3274 * Master PF Driver to finish setting up the adapter.
3276 * Note that we also do this wait if we're a non-Master-capable PF and
3277 * there is no current Master PF; a Master PF may show up momentarily
3278 * and we wouldn't want to fail pointlessly. (This can happen when an
3279 * OS loads lots of different drivers rapidly at the same time). In
3280 * this case, the Master PF returned by the firmware will be
3281 * M_PCIE_FW_MASTER so the test below will work ...
3283 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3284 master_mbox != mbox) {
3285 int waiting = FW_CMD_HELLO_TIMEOUT;
3288 * Wait for the firmware to either indicate an error or
3289 * initialized state. If we see either of these we bail out
3290 * and report the issue to the caller. If we exhaust the
3291 * "hello timeout" and we haven't exhausted our retries, try
3292 * again. Otherwise bail with a timeout error.
3301 * If neither Error nor Initialialized are indicated
3302 * by the firmware keep waiting till we exaust our
3303 * timeout ... and then retry if we haven't exhausted
3306 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3307 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3318 * We either have an Error or Initialized condition
3319 * report errors preferentially.
3322 if (pcie_fw & F_PCIE_FW_ERR)
3323 *state = DEV_STATE_ERR;
3324 else if (pcie_fw & F_PCIE_FW_INIT)
3325 *state = DEV_STATE_INIT;
3329 * If we arrived before a Master PF was selected and
3330 * there's not a valid Master PF, grab its identity
3333 if (master_mbox == M_PCIE_FW_MASTER &&
3334 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3335 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3344 * t4_fw_bye - end communication with FW
3345 * @adap: the adapter
3346 * @mbox: mailbox to use for the FW command
3348 * Issues a command to terminate communication with FW.
3350 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3352 struct fw_bye_cmd c;
3354 memset(&c, 0, sizeof(c));
3355 INIT_CMD(c, BYE, WRITE);
3356 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3360 * t4_fw_reset - issue a reset to FW
3361 * @adap: the adapter
3362 * @mbox: mailbox to use for the FW command
3363 * @reset: specifies the type of reset to perform
3365 * Issues a reset command of the specified type to FW.
3367 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3369 struct fw_reset_cmd c;
3371 memset(&c, 0, sizeof(c));
3372 INIT_CMD(c, RESET, WRITE);
3373 c.val = cpu_to_be32(reset);
3374 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3378 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3379 * @adap: the adapter
3380 * @mbox: mailbox to use for the FW RESET command (if desired)
3381 * @force: force uP into RESET even if FW RESET command fails
3383 * Issues a RESET command to firmware (if desired) with a HALT indication
3384 * and then puts the microprocessor into RESET state. The RESET command
3385 * will only be issued if a legitimate mailbox is provided (mbox <=
3386 * M_PCIE_FW_MASTER).
3388 * This is generally used in order for the host to safely manipulate the
3389 * adapter without fear of conflicting with whatever the firmware might
3390 * be doing. The only way out of this state is to RESTART the firmware
3393 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3398 * If a legitimate mailbox is provided, issue a RESET command
3399 * with a HALT indication.
3401 if (mbox <= M_PCIE_FW_MASTER) {
3402 struct fw_reset_cmd c;
3404 memset(&c, 0, sizeof(c));
3405 INIT_CMD(c, RESET, WRITE);
3406 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3407 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3408 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3412 * Normally we won't complete the operation if the firmware RESET
3413 * command fails but if our caller insists we'll go ahead and put the
3414 * uP into RESET. This can be useful if the firmware is hung or even
3415 * missing ... We'll have to take the risk of putting the uP into
3416 * RESET without the cooperation of firmware in that case.
3418 * We also force the firmware's HALT flag to be on in case we bypassed
3419 * the firmware RESET command above or we're dealing with old firmware
3420 * which doesn't have the HALT capability. This will serve as a flag
3421 * for the incoming firmware to know that it's coming out of a HALT
3422 * rather than a RESET ... if it's new enough to understand that ...
3424 if (ret == 0 || force) {
3425 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3426 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3431 * And we always return the result of the firmware RESET command
3432 * even when we force the uP into RESET ...
3438 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3439 * @adap: the adapter
3440 * @mbox: mailbox to use for the FW RESET command (if desired)
3441 * @reset: if we want to do a RESET to restart things
3443 * Restart firmware previously halted by t4_fw_halt(). On successful
3444 * return the previous PF Master remains as the new PF Master and there
3445 * is no need to issue a new HELLO command, etc.
3447 * We do this in two ways:
3449 * 1. If we're dealing with newer firmware we'll simply want to take
3450 * the chip's microprocessor out of RESET. This will cause the
3451 * firmware to start up from its start vector. And then we'll loop
3452 * until the firmware indicates it's started again (PCIE_FW.HALT
3453 * reset to 0) or we timeout.
3455 * 2. If we're dealing with older firmware then we'll need to RESET
3456 * the chip since older firmware won't recognize the PCIE_FW.HALT
3457 * flag and automatically RESET itself on startup.
3459 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3463 * Since we're directing the RESET instead of the firmware
3464 * doing it automatically, we need to clear the PCIE_FW.HALT
3467 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3470 * If we've been given a valid mailbox, first try to get the
3471 * firmware to do the RESET. If that works, great and we can
3472 * return success. Otherwise, if we haven't been given a
3473 * valid mailbox or the RESET command failed, fall back to
3474 * hitting the chip with a hammer.
3476 if (mbox <= M_PCIE_FW_MASTER) {
3477 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3479 if (t4_fw_reset(adap, mbox,
3480 F_PIORST | F_PIORSTMODE) == 0)
3484 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3489 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3490 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3491 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3502 * t4_fl_pkt_align - return the fl packet alignment
3503 * @adap: the adapter
3505 * T4 has a single field to specify the packing and padding boundary.
3506 * T5 onwards has separate fields for this and hence the alignment for
3507 * next packet offset is maximum of these two.
3509 int t4_fl_pkt_align(struct adapter *adap)
3511 u32 sge_control, sge_control2;
3512 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3514 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3516 /* T4 uses a single control field to specify both the PCIe Padding and
3517 * Packing Boundary. T5 introduced the ability to specify these
3518 * separately. The actual Ingress Packet Data alignment boundary
3519 * within Packed Buffer Mode is the maximum of these two
3522 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3523 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3525 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3527 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3529 fl_align = ingpadboundary;
3530 if (!is_t4(adap->params.chip)) {
3531 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3532 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3533 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3534 ingpackboundary = 16;
3536 ingpackboundary = 1 << (ingpackboundary +
3537 X_INGPACKBOUNDARY_SHIFT);
3539 fl_align = max(ingpadboundary, ingpackboundary);
3545 * t4_fixup_host_params_compat - fix up host-dependent parameters
3546 * @adap: the adapter
3547 * @page_size: the host's Base Page Size
3548 * @cache_line_size: the host's Cache Line Size
3549 * @chip_compat: maintain compatibility with designated chip
3551 * Various registers in the chip contain values which are dependent on the
3552 * host's Base Page and Cache Line Sizes. This function will fix all of
3553 * those registers with the appropriate values as passed in ...
3555 * @chip_compat is used to limit the set of changes that are made
3556 * to be compatible with the indicated chip release. This is used by
3557 * drivers to maintain compatibility with chip register settings when
3558 * the drivers haven't [yet] been updated with new chip support.
3560 int t4_fixup_host_params_compat(struct adapter *adap,
3561 unsigned int page_size,
3562 unsigned int cache_line_size,
3563 enum chip_type chip_compat)
3565 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3566 unsigned int sge_hps = page_shift - 10;
3567 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3568 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3569 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3571 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3572 V_HOSTPAGESIZEPF0(sge_hps) |
3573 V_HOSTPAGESIZEPF1(sge_hps) |
3574 V_HOSTPAGESIZEPF2(sge_hps) |
3575 V_HOSTPAGESIZEPF3(sge_hps) |
3576 V_HOSTPAGESIZEPF4(sge_hps) |
3577 V_HOSTPAGESIZEPF5(sge_hps) |
3578 V_HOSTPAGESIZEPF6(sge_hps) |
3579 V_HOSTPAGESIZEPF7(sge_hps));
3581 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3582 t4_set_reg_field(adap, A_SGE_CONTROL,
3583 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3584 F_EGRSTATUSPAGESIZE,
3585 V_INGPADBOUNDARY(fl_align_log -
3586 X_INGPADBOUNDARY_SHIFT) |
3587 V_EGRSTATUSPAGESIZE(stat_len != 64));
3589 unsigned int pack_align;
3590 unsigned int ingpad, ingpack;
3591 unsigned int pcie_cap;
3594 * T5 introduced the separation of the Free List Padding and
3595 * Packing Boundaries. Thus, we can select a smaller Padding
3596 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3597 * Bandwidth, and use a Packing Boundary which is large enough
3598 * to avoid false sharing between CPUs, etc.
3600 * For the PCI Link, the smaller the Padding Boundary the
3601 * better. For the Memory Controller, a smaller Padding
3602 * Boundary is better until we cross under the Memory Line
3603 * Size (the minimum unit of transfer to/from Memory). If we
3604 * have a Padding Boundary which is smaller than the Memory
3605 * Line Size, that'll involve a Read-Modify-Write cycle on the
3606 * Memory Controller which is never good.
3609 /* We want the Packing Boundary to be based on the Cache Line
3610 * Size in order to help avoid False Sharing performance
3611 * issues between CPUs, etc. We also want the Packing
3612 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3613 * get best performance when the Packing Boundary is a
3614 * multiple of the Maximum Payload Size.
3616 pack_align = fl_align;
3617 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3619 unsigned int mps, mps_log;
3622 /* The PCIe Device Control Maximum Payload Size field
3623 * [bits 7:5] encodes sizes as powers of 2 starting at
3626 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3628 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3630 if (mps > pack_align)
3635 * N.B. T5 has a different interpretation of the "0" value for
3636 * the Packing Boundary. This corresponds to 16 bytes instead
3637 * of the expected 32 bytes. We never have a Packing Boundary
3638 * less than 32 bytes so we can't use that special value but
3639 * on the other hand, if we wanted 32 bytes, the best we can
3640 * really do is 64 bytes ...
3642 if (pack_align <= 16) {
3643 ingpack = X_INGPACKBOUNDARY_16B;
3645 } else if (pack_align == 32) {
3646 ingpack = X_INGPACKBOUNDARY_64B;
3649 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3651 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3652 fl_align = pack_align;
3655 /* Use the smallest Ingress Padding which isn't smaller than
3656 * the Memory Controller Read/Write Size. We'll take that as
3657 * being 8 bytes since we don't know of any system with a
3658 * wider Memory Controller Bus Width.
3660 if (is_t5(adap->params.chip))
3661 ingpad = X_INGPADBOUNDARY_32B;
3663 ingpad = X_T6_INGPADBOUNDARY_8B;
3664 t4_set_reg_field(adap, A_SGE_CONTROL,
3665 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3666 F_EGRSTATUSPAGESIZE,
3667 V_INGPADBOUNDARY(ingpad) |
3668 V_EGRSTATUSPAGESIZE(stat_len != 64));
3669 t4_set_reg_field(adap, A_SGE_CONTROL2,
3670 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3671 V_INGPACKBOUNDARY(ingpack));
3675 * Adjust various SGE Free List Host Buffer Sizes.
3677 * The first four entries are:
3681 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3682 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3684 * For the single-MTU buffers in unpacked mode we need to include
3685 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3686 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3687 * Padding boundary. All of these are accommodated in the Factory
3688 * Default Firmware Configuration File but we need to adjust it for
3689 * this host's cache line size.
3691 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3692 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3693 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3695 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3696 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3699 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3705 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3706 * @adap: the adapter
3707 * @page_size: the host's Base Page Size
3708 * @cache_line_size: the host's Cache Line Size
3710 * Various registers in T4 contain values which are dependent on the
3711 * host's Base Page and Cache Line Sizes. This function will fix all of
3712 * those registers with the appropriate values as passed in ...
3714 * This routine makes changes which are compatible with T4 chips.
3716 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3717 unsigned int cache_line_size)
3719 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3724 * t4_fw_initialize - ask FW to initialize the device
3725 * @adap: the adapter
3726 * @mbox: mailbox to use for the FW command
3728 * Issues a command to FW to partially initialize the device. This
3729 * performs initialization that generally doesn't depend on user input.
3731 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3733 struct fw_initialize_cmd c;
3735 memset(&c, 0, sizeof(c));
3736 INIT_CMD(c, INITIALIZE, WRITE);
3737 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3741 * t4_query_params_rw - query FW or device parameters
3742 * @adap: the adapter
3743 * @mbox: mailbox to use for the FW command
3746 * @nparams: the number of parameters
3747 * @params: the parameter names
3748 * @val: the parameter values
3749 * @rw: Write and read flag
3751 * Reads the value of FW or device parameters. Up to 7 parameters can be
3754 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3755 unsigned int pf, unsigned int vf,
3756 unsigned int nparams, const u32 *params,
3761 struct fw_params_cmd c;
3762 __be32 *p = &c.param[0].mnem;
3767 memset(&c, 0, sizeof(c));
3768 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3769 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3770 V_FW_PARAMS_CMD_PFN(pf) |
3771 V_FW_PARAMS_CMD_VFN(vf));
3772 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3774 for (i = 0; i < nparams; i++) {
3775 *p++ = cpu_to_be32(*params++);
3777 *p = cpu_to_be32(*(val + i));
3781 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3783 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3784 *val++ = be32_to_cpu(*p);
3788 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3789 unsigned int vf, unsigned int nparams, const u32 *params,
3792 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3796 * t4_set_params_timeout - sets FW or device parameters
3797 * @adap: the adapter
3798 * @mbox: mailbox to use for the FW command
3801 * @nparams: the number of parameters
3802 * @params: the parameter names
3803 * @val: the parameter values
3804 * @timeout: the timeout time
3806 * Sets the value of FW or device parameters. Up to 7 parameters can be
3807 * specified at once.
3809 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3810 unsigned int pf, unsigned int vf,
3811 unsigned int nparams, const u32 *params,
3812 const u32 *val, int timeout)
3814 struct fw_params_cmd c;
3815 __be32 *p = &c.param[0].mnem;
3820 memset(&c, 0, sizeof(c));
3821 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3822 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3823 V_FW_PARAMS_CMD_PFN(pf) |
3824 V_FW_PARAMS_CMD_VFN(vf));
3825 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3828 *p++ = cpu_to_be32(*params++);
3829 *p++ = cpu_to_be32(*val++);
3832 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3835 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3836 unsigned int vf, unsigned int nparams, const u32 *params,
3839 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3840 FW_CMD_MAX_TIMEOUT);
3844 * t4_alloc_vi_func - allocate a virtual interface
3845 * @adap: the adapter
3846 * @mbox: mailbox to use for the FW command
3847 * @port: physical port associated with the VI
3848 * @pf: the PF owning the VI
3849 * @vf: the VF owning the VI
3850 * @nmac: number of MAC addresses needed (1 to 5)
3851 * @mac: the MAC addresses of the VI
3852 * @rss_size: size of RSS table slice associated with this VI
3853 * @portfunc: which Port Application Function MAC Address is desired
3854 * @idstype: Intrusion Detection Type
3856 * Allocates a virtual interface for the given physical port. If @mac is
3857 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3858 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3859 * stored consecutively so the space needed is @nmac * 6 bytes.
3860 * Returns a negative error number or the non-negative VI id.
3862 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
3863 unsigned int port, unsigned int pf, unsigned int vf,
3864 unsigned int nmac, u8 *mac, unsigned int *rss_size,
3865 unsigned int portfunc, unsigned int idstype,
3871 memset(&c, 0, sizeof(c));
3872 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3873 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3874 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3875 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3876 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
3877 V_FW_VI_CMD_FUNC(portfunc));
3878 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3881 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3886 memcpy(mac, c.mac, sizeof(c.mac));
3889 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3892 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3895 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3898 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3903 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
3905 *vivld = G_FW_VI_CMD_VFVLD(be32_to_cpu(c.alloc_to_len16));
3907 *vin = G_FW_VI_CMD_VIN(be32_to_cpu(c.alloc_to_len16));
3908 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
3912 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
3913 * @adap: the adapter
3914 * @mbox: mailbox to use for the FW command
3915 * @port: physical port associated with the VI
3916 * @pf: the PF owning the VI
3917 * @vf: the VF owning the VI
3918 * @nmac: number of MAC addresses needed (1 to 5)
3919 * @mac: the MAC addresses of the VI
3920 * @rss_size: size of RSS table slice associated with this VI
3922 * Backwards compatible and convieniance routine to allocate a Virtual
3923 * Interface with a Ethernet Port Application Function and Intrustion
3924 * Detection System disabled.
3926 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3927 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3928 unsigned int *rss_size, u8 *vivld, u8 *vin)
3930 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
3931 FW_VI_FUNC_ETH, 0, vivld, vin);
3935 * t4_free_vi - free a virtual interface
3936 * @adap: the adapter
3937 * @mbox: mailbox to use for the FW command
3938 * @pf: the PF owning the VI
3939 * @vf: the VF owning the VI
3940 * @viid: virtual interface identifiler
3942 * Free a previously allocated virtual interface.
3944 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
3945 unsigned int vf, unsigned int viid)
3949 memset(&c, 0, sizeof(c));
3950 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3953 c.op_to_vfn |= cpu_to_be32(V_FW_VI_CMD_PFN(pf) |
3954 V_FW_VI_CMD_VFN(vf));
3955 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
3956 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
3959 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3961 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
3965 * t4_set_rxmode - set Rx properties of a virtual interface
3966 * @adap: the adapter
3967 * @mbox: mailbox to use for the FW command
3969 * @mtu: the new MTU or -1
3970 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3971 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3972 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3973 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
3975 * @sleep_ok: if true we may sleep while awaiting command completion
3977 * Sets Rx properties of a virtual interface.
3979 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3980 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3983 struct fw_vi_rxmode_cmd c;
3985 /* convert to FW values */
3987 mtu = M_FW_VI_RXMODE_CMD_MTU;
3989 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
3991 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
3993 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
3995 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
3997 memset(&c, 0, sizeof(c));
3998 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
3999 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4000 V_FW_VI_RXMODE_CMD_VIID(viid));
4001 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
4002 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
4003 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
4004 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
4005 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
4006 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
4008 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL,
4011 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4015 * t4_alloc_raw_mac_filt - Adds a raw mac entry in mps tcam
4016 * @adap: the adapter
4018 * @mac: the MAC address
4020 * @idx: index at which to add this entry
4021 * @port_id: the port index
4022 * @lookup_type: MAC address for inner (1) or outer (0) header
4023 * @sleep_ok: call is allowed to sleep
4025 * Adds the mac entry at the specified index using raw mac interface.
4027 * Returns a negative error number or the allocated index for this mac.
4029 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
4030 const u8 *addr, const u8 *mask, unsigned int idx,
4031 u8 lookup_type, u8 port_id, bool sleep_ok)
4034 struct fw_vi_mac_cmd c;
4035 struct fw_vi_mac_raw *p = &c.u.raw;
4038 memset(&c, 0, sizeof(c));
4039 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4040 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4041 V_FW_VI_MAC_CMD_VIID(viid));
4042 val = V_FW_CMD_LEN16(1) |
4043 V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4044 c.freemacs_to_len16 = cpu_to_be32(val);
4046 /* Specify that this is an inner mac address */
4047 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx));
4049 /* Lookup Type. Outer header: 0, Inner header: 1 */
4050 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4051 V_DATAPORTNUM(port_id));
4052 /* Lookup mask and port mask */
4053 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4054 V_DATAPORTNUM(M_DATAPORTNUM));
4056 /* Copy the address and the mask */
4057 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4058 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4060 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4062 ret = G_FW_VI_MAC_CMD_RAW_IDX(be32_to_cpu(p->raw_idx_pkd));
4063 if (ret != (int)idx)
4071 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
4072 * @adap: the adapter
4074 * @addr: the MAC address
4076 * @idx: index of the entry in mps tcam
4077 * @lookup_type: MAC address for inner (1) or outer (0) header
4078 * @port_id: the port index
4079 * @sleep_ok: call is allowed to sleep
4081 * Removes the mac entry at the specified index using raw mac interface.
4083 * Returns a negative error number on failure.
4085 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
4086 const u8 *addr, const u8 *mask, unsigned int idx,
4087 u8 lookup_type, u8 port_id, bool sleep_ok)
4089 struct fw_vi_mac_cmd c;
4090 struct fw_vi_mac_raw *p = &c.u.raw;
4093 memset(&c, 0, sizeof(c));
4094 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4095 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4097 V_FW_VI_MAC_CMD_VIID(viid));
4098 raw = V_FW_VI_MAC_CMD_ENTRY_TYPE(FW_VI_MAC_TYPE_RAW);
4099 c.freemacs_to_len16 = cpu_to_be32(V_FW_VI_MAC_CMD_FREEMACS(0U) |
4103 p->raw_idx_pkd = cpu_to_be32(V_FW_VI_MAC_CMD_RAW_IDX(idx) |
4104 FW_VI_MAC_ID_BASED_FREE);
4106 /* Lookup Type. Outer header: 0, Inner header: 1 */
4107 p->data0_pkd = cpu_to_be32(V_DATALKPTYPE(lookup_type) |
4108 V_DATAPORTNUM(port_id));
4109 /* Lookup mask and port mask */
4110 p->data0m_pkd = cpu_to_be64(V_DATALKPTYPE(M_DATALKPTYPE) |
4111 V_DATAPORTNUM(M_DATAPORTNUM));
4113 /* Copy the address and the mask */
4114 memcpy((u8 *)&p->data1[0] + 2, addr, ETHER_ADDR_LEN);
4115 memcpy((u8 *)&p->data1m[0] + 2, mask, ETHER_ADDR_LEN);
4117 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
4121 * t4_change_mac - modifies the exact-match filter for a MAC address
4122 * @adap: the adapter
4123 * @mbox: mailbox to use for the FW command
4125 * @idx: index of existing filter for old value of MAC address, or -1
4126 * @addr: the new MAC address value
4127 * @persist: whether a new MAC allocation should be persistent
4128 * @add_smt: if true also add the address to the HW SMT
4130 * Modifies an exact-match filter and sets it to the new MAC address if
4131 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
4132 * latter case the address is added persistently if @persist is %true.
4134 * Note that in general it is not possible to modify the value of a given
4135 * filter so the generic way to modify an address filter is to free the one
4136 * being used by the old address value and allocate a new filter for the
4137 * new address value.
4139 * Returns a negative error number or the index of the filter with the new
4140 * MAC value. Note that this index may differ from @idx.
4142 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
4143 int idx, const u8 *addr, bool persist, bool add_smt)
4146 struct fw_vi_mac_cmd c;
4147 struct fw_vi_mac_exact *p = c.u.exact;
4148 int max_mac_addr = adap->params.arch.mps_tcam_size;
4150 if (idx < 0) /* new allocation */
4151 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
4152 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
4154 memset(&c, 0, sizeof(c));
4155 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
4156 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
4157 V_FW_VI_MAC_CMD_VIID(viid));
4158 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
4159 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
4160 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
4161 V_FW_VI_MAC_CMD_IDX(idx));
4162 memcpy(p->macaddr, addr, sizeof(p->macaddr));
4165 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4167 ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
4169 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
4170 if (ret >= max_mac_addr)
4177 * t4_enable_vi_params - enable/disable a virtual interface
4178 * @adap: the adapter
4179 * @mbox: mailbox to use for the FW command
4181 * @rx_en: 1=enable Rx, 0=disable Rx
4182 * @tx_en: 1=enable Tx, 0=disable Tx
4183 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
4185 * Enables/disables a virtual interface. Note that setting DCB Enable
4186 * only makes sense when enabling a Virtual Interface ...
4188 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
4189 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
4191 struct fw_vi_enable_cmd c;
4193 memset(&c, 0, sizeof(c));
4194 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
4195 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4196 V_FW_VI_ENABLE_CMD_VIID(viid));
4197 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
4198 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
4199 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
4202 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
4204 return t4vf_wr_mbox_ns(adap, &c, sizeof(c), NULL);
4208 * t4_enable_vi - enable/disable a virtual interface
4209 * @adap: the adapter
4210 * @mbox: mailbox to use for the FW command
4212 * @rx_en: 1=enable Rx, 0=disable Rx
4213 * @tx_en: 1=enable Tx, 0=disable Tx
4215 * Enables/disables a virtual interface. Note that setting DCB Enable
4216 * only makes sense when enabling a Virtual Interface ...
4218 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
4219 bool rx_en, bool tx_en)
4221 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
4225 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
4226 * @adap: the adapter
4227 * @mbox: mailbox to use for the FW command
4228 * @start: %true to enable the queues, %false to disable them
4229 * @pf: the PF owning the queues
4230 * @vf: the VF owning the queues
4231 * @iqid: ingress queue id
4232 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4233 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4235 * Starts or stops an ingress queue and its associated FLs, if any.
4237 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
4238 unsigned int pf, unsigned int vf, unsigned int iqid,
4239 unsigned int fl0id, unsigned int fl1id)
4243 memset(&c, 0, sizeof(c));
4244 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4246 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4247 V_FW_IQ_CMD_IQSTOP(!start) |
4249 c.iqid = cpu_to_be16(iqid);
4250 c.fl0id = cpu_to_be16(fl0id);
4251 c.fl1id = cpu_to_be16(fl1id);
4253 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4254 V_FW_IQ_CMD_VFN(vf));
4255 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4257 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4262 * t4_iq_free - free an ingress queue and its FLs
4263 * @adap: the adapter
4264 * @mbox: mailbox to use for the FW command
4265 * @pf: the PF owning the queues
4266 * @vf: the VF owning the queues
4267 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4268 * @iqid: ingress queue id
4269 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4270 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4272 * Frees an ingress queue and its associated FLs, if any.
4274 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4275 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4276 unsigned int fl0id, unsigned int fl1id)
4280 memset(&c, 0, sizeof(c));
4281 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4284 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4285 V_FW_IQ_CMD_VFN(vf));
4286 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4287 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4288 c.iqid = cpu_to_be16(iqid);
4289 c.fl0id = cpu_to_be16(fl0id);
4290 c.fl1id = cpu_to_be16(fl1id);
4292 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4294 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4298 * t4_eth_eq_free - free an Ethernet egress queue
4299 * @adap: the adapter
4300 * @mbox: mailbox to use for the FW command
4301 * @pf: the PF owning the queue
4302 * @vf: the VF owning the queue
4303 * @eqid: egress queue id
4305 * Frees an Ethernet egress queue.
4307 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4308 unsigned int vf, unsigned int eqid)
4310 struct fw_eq_eth_cmd c;
4312 memset(&c, 0, sizeof(c));
4313 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4314 F_FW_CMD_REQUEST | F_FW_CMD_EXEC);
4316 c.op_to_vfn |= cpu_to_be32(V_FW_IQ_CMD_PFN(pf) |
4317 V_FW_IQ_CMD_VFN(vf));
4318 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4319 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4321 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4323 return t4vf_wr_mbox(adap, &c, sizeof(c), NULL);
4327 * t4_link_down_rc_str - return a string for a Link Down Reason Code
4328 * @link_down_rc: Link Down Reason Code
4330 * Returns a string representation of the Link Down Reason Code.
4332 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
4334 static const char * const reason[] = {
4337 "Auto-negotiation Failure",
4339 "Insufficient Airflow",
4340 "Unable To Determine Reason",
4341 "No RX Signal Detected",
4345 if (link_down_rc >= ARRAY_SIZE(reason))
4346 return "Bad Reason Code";
4348 return reason[link_down_rc];
4351 static u32 t4_speed_to_fwcap(u32 speed)
4355 return FW_PORT_CAP32_SPEED_100G;
4357 return FW_PORT_CAP32_SPEED_50G;
4359 return FW_PORT_CAP32_SPEED_40G;
4361 return FW_PORT_CAP32_SPEED_25G;
4363 return FW_PORT_CAP32_SPEED_10G;
4365 return FW_PORT_CAP32_SPEED_1G;
4367 return FW_PORT_CAP32_SPEED_100M;
4375 /* Return the highest speed set in the port capabilities, in Mb/s. */
4376 unsigned int t4_fwcap_to_speed(u32 caps)
4378 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
4380 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
4384 TEST_SPEED_RETURN(100G, 100000);
4385 TEST_SPEED_RETURN(50G, 50000);
4386 TEST_SPEED_RETURN(40G, 40000);
4387 TEST_SPEED_RETURN(25G, 25000);
4388 TEST_SPEED_RETURN(10G, 10000);
4389 TEST_SPEED_RETURN(1G, 1000);
4390 TEST_SPEED_RETURN(100M, 100);
4392 #undef TEST_SPEED_RETURN
4397 static void t4_set_link_autoneg_speed(struct port_info *pi, u32 *new_caps)
4399 struct link_config *lc = &pi->link_cfg;
4400 u32 caps = *new_caps;
4402 caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4403 caps |= G_FW_PORT_CAP32_SPEED(lc->acaps);
4408 int t4_set_link_speed(struct port_info *pi, u32 speed, u32 *new_caps)
4410 u32 fw_speed_cap = t4_speed_to_fwcap(speed);
4411 struct link_config *lc = &pi->link_cfg;
4412 u32 caps = *new_caps;
4414 if (!(lc->pcaps & fw_speed_cap))
4417 caps &= ~V_FW_PORT_CAP32_SPEED(M_FW_PORT_CAP32_SPEED);
4418 caps |= fw_speed_cap;
4425 int t4_set_link_pause(struct port_info *pi, u8 autoneg, u8 pause_tx,
4426 u8 pause_rx, u32 *new_caps)
4428 struct link_config *lc = &pi->link_cfg;
4429 u32 caps = *new_caps;
4432 max_speed = t4_fwcap_to_speed(lc->link_caps);
4435 if (!(lc->pcaps & FW_PORT_CAP32_ANEG))
4438 caps |= FW_PORT_CAP32_ANEG;
4439 t4_set_link_autoneg_speed(pi, &caps);
4442 max_speed = t4_fwcap_to_speed(lc->acaps);
4444 caps &= ~FW_PORT_CAP32_ANEG;
4445 t4_set_link_speed(pi, max_speed, &caps);
4448 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO)
4449 caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4451 caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4452 caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4453 if (pause_tx && pause_rx) {
4454 caps |= FW_PORT_CAP32_FC_TX | FW_PORT_CAP32_FC_RX;
4455 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4456 caps |= FW_PORT_CAP32_802_3_PAUSE;
4457 } else if (pause_tx) {
4458 caps |= FW_PORT_CAP32_FC_TX;
4459 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4460 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4461 } else if (pause_rx) {
4462 caps |= FW_PORT_CAP32_FC_RX;
4463 if (lc->pcaps & FW_PORT_CAP32_802_3_PAUSE)
4464 caps |= FW_PORT_CAP32_802_3_PAUSE;
4466 if (lc->pcaps & FW_PORT_CAP32_802_3_ASM_DIR)
4467 caps |= FW_PORT_CAP32_802_3_ASM_DIR;
4475 int t4_set_link_fec(struct port_info *pi, u8 fec_rs, u8 fec_baser,
4476 u8 fec_none, u32 *new_caps)
4478 struct link_config *lc = &pi->link_cfg;
4479 u32 max_speed, caps = *new_caps;
4481 if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
4484 /* Link might be down. In that case consider the max
4487 max_speed = t4_fwcap_to_speed(lc->link_caps);
4489 max_speed = t4_fwcap_to_speed(lc->acaps);
4491 caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4493 switch (max_speed) {
4496 caps |= FW_PORT_CAP32_FEC_RS;
4504 switch (max_speed) {
4507 caps |= FW_PORT_CAP32_FEC_BASER_RS;
4515 caps |= FW_PORT_CAP32_FEC_NO_FEC;
4517 if (!(caps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) {
4518 /* No explicit encoding is requested.
4519 * So, default back to AUTO.
4521 switch (max_speed) {
4523 caps |= FW_PORT_CAP32_FEC_RS |
4524 FW_PORT_CAP32_FEC_NO_FEC;
4527 caps |= FW_PORT_CAP32_FEC_BASER_RS |
4528 FW_PORT_CAP32_FEC_NO_FEC;
4531 caps |= FW_PORT_CAP32_FEC_RS |
4532 FW_PORT_CAP32_FEC_BASER_RS |
4533 FW_PORT_CAP32_FEC_NO_FEC;
4546 * t4_handle_get_port_info - process a FW reply message
4547 * @pi: the port info
4548 * @rpl: start of the FW message
4550 * Processes a GET_PORT_INFO FW reply message.
4552 static void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
4554 const struct fw_port_cmd *cmd = (const void *)rpl;
4555 u8 link_ok, link_down_rc, mod_type, port_type;
4556 u32 action, pcaps, acaps, link_caps, lstatus;
4557 struct link_config *lc = &pi->link_cfg;
4558 struct adapter *adapter = pi->adapter;
4561 /* Extract the various fields from the Port Information message.
4563 action = be32_to_cpu(cmd->action_to_len16);
4564 if (G_FW_PORT_CMD_ACTION(action) != FW_PORT_ACTION_GET_PORT_INFO32) {
4565 dev_warn(adapter, "Handle Port Information: Bad Command/Action %#x\n",
4570 lstatus = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
4571 link_ok = (lstatus & F_FW_PORT_CMD_LSTATUS32) ? 1 : 0;
4572 link_down_rc = G_FW_PORT_CMD_LINKDNRC32(lstatus);
4573 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus);
4574 mod_type = G_FW_PORT_CMD_MODTYPE32(lstatus);
4576 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
4577 acaps = be32_to_cpu(cmd->u.info32.acaps32);
4578 link_caps = be32_to_cpu(cmd->u.info32.linkattr32);
4580 if (mod_type != lc->mod_type) {
4581 t4_init_link_config(pi, pcaps, acaps, lc->mdio_addr,
4582 port_type, mod_type);
4583 t4_os_portmod_changed(adapter, pi->pidx);
4586 if (link_ok != lc->link_ok || acaps != lc->acaps ||
4587 link_caps != lc->link_caps) { /* something changed */
4588 if (!link_ok && lc->link_ok) {
4589 lc->link_down_rc = link_down_rc;
4590 dev_warn(adap, "Port %d link down, reason: %s\n",
4592 t4_link_down_rc_str(link_down_rc));
4594 lc->link_ok = link_ok;
4596 lc->link_caps = link_caps;
4597 t4_os_link_changed(adapter, pi->pidx);
4600 if (mod_changed != 0 && is_pf4(adapter) != 0) {
4601 u32 mod_caps = lc->admin_caps;
4604 ret = t4_link_l1cfg_ns(pi, mod_caps);
4605 if (ret != FW_SUCCESS)
4607 "Attempt to update new Transceiver Module settings %#x failed with error: %d\n",
4613 * t4_ctrl_eq_free - free a control egress queue
4614 * @adap: the adapter
4615 * @mbox: mailbox to use for the FW command
4616 * @pf: the PF owning the queue
4617 * @vf: the VF owning the queue
4618 * @eqid: egress queue id
4620 * Frees a control egress queue.
4622 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4623 unsigned int vf, unsigned int eqid)
4625 struct fw_eq_ctrl_cmd c;
4627 memset(&c, 0, sizeof(c));
4628 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) |
4629 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4630 V_FW_EQ_CTRL_CMD_PFN(pf) |
4631 V_FW_EQ_CTRL_CMD_VFN(vf));
4632 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
4633 c.cmpliqid_eqid = cpu_to_be32(V_FW_EQ_CTRL_CMD_EQID(eqid));
4634 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4638 * t4_handle_fw_rpl - process a FW reply message
4639 * @adap: the adapter
4640 * @rpl: start of the FW message
4642 * Processes a FW message, such as link state change messages.
4644 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4646 u8 opcode = *(const u8 *)rpl;
4649 * This might be a port command ... this simplifies the following
4650 * conditionals ... We can get away with pre-dereferencing
4651 * action_to_len16 because it's in the first 16 bytes and all messages
4652 * will be at least that long.
4654 const struct fw_port_cmd *p = (const void *)rpl;
4655 unsigned int action =
4656 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4658 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO32) {
4659 /* link/module state change message */
4660 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4661 struct port_info *pi = NULL;
4664 for_each_port(adap, i) {
4665 pi = adap2pinfo(adap, i);
4666 if (pi->tx_chan == chan)
4670 t4_handle_get_port_info(pi, rpl);
4672 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4678 void t4_reset_link_config(struct adapter *adap, int idx)
4680 struct port_info *pi = adap2pinfo(adap, idx);
4681 struct link_config *lc = &pi->link_cfg;
4684 lc->link_down_rc = 0;
4689 * t4_init_link_config - initialize a link's SW state
4690 * @pi: the port info
4691 * @pcaps: link Port Capabilities
4692 * @acaps: link current Advertised Port Capabilities
4693 * @mdio_addr : address of the PHY
4694 * @port_type : firmware port type
4695 * @mod_type : firmware module type
4697 * Initializes the SW state maintained for each link, including the link's
4698 * capabilities and default speed/flow-control/autonegotiation settings.
4700 void t4_init_link_config(struct port_info *pi, u32 pcaps, u32 acaps,
4701 u8 mdio_addr, u8 port_type, u8 mod_type)
4703 u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
4704 struct link_config *lc = &pi->link_cfg;
4708 lc->admin_caps = acaps;
4711 lc->mdio_addr = mdio_addr;
4712 lc->port_type = port_type;
4713 lc->mod_type = mod_type;
4716 lc->link_down_rc = 0;
4718 /* Turn Tx and Rx pause off by default */
4719 lc->admin_caps &= ~V_FW_PORT_CAP32_FC(M_FW_PORT_CAP32_FC);
4720 lc->admin_caps &= ~V_FW_PORT_CAP32_802_3(M_FW_PORT_CAP32_802_3);
4721 if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
4722 lc->admin_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
4724 /* Reset FEC caps to default values */
4725 if (lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)) {
4726 if (lc->acaps & FW_PORT_CAP32_FEC_RS)
4728 else if (lc->acaps & FW_PORT_CAP32_FEC_BASER_RS)
4733 lc->admin_caps &= ~V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC);
4734 t4_set_link_fec(pi, fec_rs, fec_baser, fec_none,
4738 if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
4739 lc->admin_caps &= ~FW_PORT_CAP32_FORCE_FEC;
4741 /* Reset MDI to AUTO */
4742 if (lc->pcaps & FW_PORT_CAP32_MDIAUTO) {
4743 lc->admin_caps &= ~V_FW_PORT_CAP32_MDI(M_FW_PORT_CAP32_MDI);
4744 lc->admin_caps |= V_FW_PORT_CAP32_MDI(FW_PORT_CAP32_MDI_AUTO);
4749 * t4_wait_dev_ready - wait till to reads of registers work
4751 * Right after the device is RESET is can take a small amount of time
4752 * for it to respond to register reads. Until then, all reads will
4753 * return either 0xff...ff or 0xee...ee. Return an error if reads
4754 * don't work within a reasonable time frame.
4756 static int t4_wait_dev_ready(struct adapter *adapter)
4760 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4762 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4766 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4767 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4770 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4776 u32 vendor_and_model_id;
4780 int t4_get_flash_params(struct adapter *adapter)
4783 * Table for non-standard supported Flash parts. Note, all Flash
4784 * parts must have 64KB sectors.
4786 static struct flash_desc supported_flash[] = {
4787 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4792 unsigned int part, manufacturer;
4793 unsigned int density, size = 0;
4796 * Issue a Read ID Command to the Flash part. We decode supported
4797 * Flash parts and their sizes from this. There's a newer Query
4798 * Command which can retrieve detailed geometry information but
4799 * many Flash parts don't support it.
4801 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4803 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4804 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4809 * Check to see if it's one of our non-standard supported Flash parts.
4811 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4812 if (supported_flash[part].vendor_and_model_id == flashid) {
4813 adapter->params.sf_size =
4814 supported_flash[part].size_mb;
4815 adapter->params.sf_nsec =
4816 adapter->params.sf_size / SF_SEC_SIZE;
4822 * Decode Flash part size. The code below looks repetative with
4823 * common encodings, but that's not guaranteed in the JEDEC
4824 * specification for the Read JADEC ID command. The only thing that
4825 * we're guaranteed by the JADEC specification is where the
4826 * Manufacturer ID is in the returned result. After that each
4827 * Manufacturer ~could~ encode things completely differently.
4828 * Note, all Flash parts must have 64KB sectors.
4830 manufacturer = flashid & 0xff;
4831 switch (manufacturer) {
4832 case 0x20: { /* Micron/Numonix */
4834 * This Density -> Size decoding table is taken from Micron
4837 density = (flashid >> 16) & 0xff;
4840 size = 1 << 20; /* 1MB */
4843 size = 1 << 21; /* 2MB */
4846 size = 1 << 22; /* 4MB */
4849 size = 1 << 23; /* 8MB */
4852 size = 1 << 24; /* 16MB */
4855 size = 1 << 25; /* 32MB */
4858 size = 1 << 26; /* 64MB */
4861 size = 1 << 27; /* 128MB */
4864 size = 1 << 28; /* 256MB */
4870 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
4872 * This Density -> Size decoding table is taken from ISSI
4875 density = (flashid >> 16) & 0xff;
4878 size = 1 << 25; /* 32MB */
4881 size = 1 << 26; /* 64MB */
4887 case 0xc2: { /* Macronix */
4889 * This Density -> Size decoding table is taken from Macronix
4892 density = (flashid >> 16) & 0xff;
4895 size = 1 << 23; /* 8MB */
4898 size = 1 << 24; /* 16MB */
4904 case 0xef: { /* Winbond */
4906 * This Density -> Size decoding table is taken from Winbond
4909 density = (flashid >> 16) & 0xff;
4912 size = 1 << 23; /* 8MB */
4915 size = 1 << 24; /* 16MB */
4922 /* If we didn't recognize the FLASH part, that's no real issue: the
4923 * Hardware/Software contract says that Hardware will _*ALWAYS*_
4924 * use a FLASH part which is at least 4MB in size and has 64KB
4925 * sectors. The unrecognized FLASH part is likely to be much larger
4926 * than 4MB, but that's all we really need.
4930 "Unknown Flash Part, ID = %#x, assuming 4MB\n",
4936 * Store decoded Flash size and fall through into vetting code.
4938 adapter->params.sf_size = size;
4939 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4943 * We should reject adapters with FLASHes which are too small. So, emit
4946 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4947 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4948 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4953 static void set_pcie_completion_timeout(struct adapter *adapter,
4959 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4961 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4964 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4969 * t4_get_chip_type - Determine chip type from device ID
4970 * @adap: the adapter
4971 * @ver: adapter version
4973 int t4_get_chip_type(struct adapter *adap, int ver)
4975 enum chip_type chip = 0;
4976 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4978 /* Retrieve adapter's device ID */
4981 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4984 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4987 dev_err(adap, "Device %d is not supported\n",
4988 adap->params.pci.device_id);
4996 * t4_prep_adapter - prepare SW and HW for operation
4997 * @adapter: the adapter
4999 * Initialize adapter SW state for the various HW modules, set initial
5000 * values for some adapter tunables, take PHYs out of reset, and
5001 * initialize the MDIO interface.
5003 int t4_prep_adapter(struct adapter *adapter)
5008 ret = t4_wait_dev_ready(adapter);
5012 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
5013 adapter->params.pci.device_id = adapter->pdev->id.device_id;
5014 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
5017 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
5018 * ADAPTER (VERSION << 4 | REVISION)
5020 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
5021 adapter->params.chip = 0;
5024 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
5025 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
5026 adapter->params.arch.mps_tcam_size =
5027 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5028 adapter->params.arch.mps_rplc_size = 128;
5029 adapter->params.arch.nchan = NCHAN;
5030 adapter->params.arch.vfcount = 128;
5031 /* Congestion map is for 4 channels so that
5032 * MPS can have 4 priority per port.
5034 adapter->params.arch.cng_ch_bits_log = 2;
5037 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
5038 adapter->params.arch.sge_fl_db = 0;
5039 adapter->params.arch.mps_tcam_size =
5040 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
5041 adapter->params.arch.mps_rplc_size = 256;
5042 adapter->params.arch.nchan = 2;
5043 adapter->params.arch.vfcount = 256;
5044 /* Congestion map is for 2 channels so that
5045 * MPS can have 8 priority per port.
5047 adapter->params.arch.cng_ch_bits_log = 3;
5050 dev_err(adapter, "%s: Device %d is not supported\n",
5051 __func__, adapter->params.pci.device_id);
5055 adapter->params.pci.vpd_cap_addr =
5056 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
5058 ret = t4_get_flash_params(adapter);
5060 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
5065 adapter->params.cim_la_size = CIMLA_SIZE;
5067 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
5070 * Default port and clock for debugging in case we can't reach FW.
5072 adapter->params.nports = 1;
5073 adapter->params.portvec = 1;
5074 adapter->params.vpd.cclk = 50000;
5076 /* Set pci completion timeout value to 4 seconds. */
5077 set_pcie_completion_timeout(adapter, 0xd);
5082 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
5083 * @adapter: the adapter
5084 * @qid: the Queue ID
5085 * @qtype: the Ingress or Egress type for @qid
5086 * @pbar2_qoffset: BAR2 Queue Offset
5087 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
5089 * Returns the BAR2 SGE Queue Registers information associated with the
5090 * indicated Absolute Queue ID. These are passed back in return value
5091 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
5092 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
5094 * This may return an error which indicates that BAR2 SGE Queue
5095 * registers aren't available. If an error is not returned, then the
5096 * following values are returned:
5098 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
5099 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
5101 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
5102 * require the "Inferred Queue ID" ability may be used. E.g. the
5103 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
5104 * then these "Inferred Queue ID" register may not be used.
5106 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
5107 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
5108 unsigned int *pbar2_qid)
5110 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
5111 u64 bar2_page_offset, bar2_qoffset;
5112 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
5115 * T4 doesn't support BAR2 SGE Queue registers.
5117 if (is_t4(adapter->params.chip))
5121 * Get our SGE Page Size parameters.
5123 page_shift = adapter->params.sge.hps + 10;
5124 page_size = 1 << page_shift;
5127 * Get the right Queues per Page parameters for our Queue.
5129 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
5130 adapter->params.sge.eq_qpp :
5131 adapter->params.sge.iq_qpp);
5132 qpp_mask = (1 << qpp_shift) - 1;
5135 * Calculate the basics of the BAR2 SGE Queue register area:
5136 * o The BAR2 page the Queue registers will be in.
5137 * o The BAR2 Queue ID.
5138 * o The BAR2 Queue ID Offset into the BAR2 page.
5140 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
5141 bar2_qid = qid & qpp_mask;
5142 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
5145 * If the BAR2 Queue ID Offset is less than the Page Size, then the
5146 * hardware will infer the Absolute Queue ID simply from the writes to
5147 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
5148 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
5149 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
5150 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
5151 * from the BAR2 Page and BAR2 Queue ID.
5153 * One important censequence of this is that some BAR2 SGE registers
5154 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
5155 * there. But other registers synthesize the SGE Queue ID purely
5156 * from the writes to the registers -- the Write Combined Doorbell
5157 * Buffer is a good example. These BAR2 SGE Registers are only
5158 * available for those BAR2 SGE Register areas where the SGE Absolute
5159 * Queue ID can be inferred from simple writes.
5161 bar2_qoffset = bar2_page_offset;
5162 bar2_qinferred = (bar2_qid_offset < page_size);
5163 if (bar2_qinferred) {
5164 bar2_qoffset += bar2_qid_offset;
5168 *pbar2_qoffset = bar2_qoffset;
5169 *pbar2_qid = bar2_qid;
5174 * t4_init_sge_params - initialize adap->params.sge
5175 * @adapter: the adapter
5177 * Initialize various fields of the adapter's SGE Parameters structure.
5179 int t4_init_sge_params(struct adapter *adapter)
5181 struct sge_params *sge_params = &adapter->params.sge;
5183 unsigned int s_hps, s_qpp;
5186 * Extract the SGE Page Size for our PF.
5188 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
5189 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
5191 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
5194 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
5196 s_qpp = (S_QUEUESPERPAGEPF0 +
5197 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
5198 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
5199 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5200 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
5201 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
5207 * t4_init_tp_params - initialize adap->params.tp
5208 * @adap: the adapter
5210 * Initialize various fields of the adapter's TP Parameters structure.
5212 int t4_init_tp_params(struct adapter *adap)
5217 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
5218 adap->params.tp.tre = G_TIMERRESOLUTION(v);
5219 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
5221 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5222 for (chan = 0; chan < NCHAN; chan++)
5223 adap->params.tp.tx_modq[chan] = chan;
5226 * Cache the adapter's Compressed Filter Mode/Mask and global Ingress
5229 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5230 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FILTER) |
5231 V_FW_PARAMS_PARAM_Y(FW_PARAM_DEV_FILTER_MODE_MASK));
5233 /* Read current value */
5234 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5237 dev_info(adap, "Current filter mode/mask 0x%x:0x%x\n",
5238 G_FW_PARAMS_PARAM_FILTER_MODE(v),
5239 G_FW_PARAMS_PARAM_FILTER_MASK(v));
5240 adap->params.tp.vlan_pri_map =
5241 G_FW_PARAMS_PARAM_FILTER_MODE(v);
5242 adap->params.tp.filter_mask =
5243 G_FW_PARAMS_PARAM_FILTER_MASK(v);
5246 "Failed to read filter mode/mask via fw api, using indirect-reg-read\n");
5248 /* In case of older-fw (which doesn't expose the api
5249 * FW_PARAM_DEV_FILTER_MODE_MASK) and newer-driver (which uses
5250 * the fw api) combination, fall-back to older method of reading
5251 * the filter mode from indirect-register
5253 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5254 &adap->params.tp.vlan_pri_map, 1,
5257 /* With the older-fw and newer-driver combination we might run
5258 * into an issue when user wants to use hash filter region but
5259 * the filter_mask is zero, in this case filter_mask validation
5260 * is tough. To avoid that we set the filter_mask same as filter
5261 * mode, which will behave exactly as the older way of ignoring
5262 * the filter mask validation.
5264 adap->params.tp.filter_mask = adap->params.tp.vlan_pri_map;
5267 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
5268 &adap->params.tp.ingress_config, 1,
5269 A_TP_INGRESS_CONFIG);
5271 /* For T6, cache the adapter's compressed error vector
5272 * and passing outer header info for encapsulated packets.
5274 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
5275 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
5276 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
5280 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
5281 * shift positions of several elements of the Compressed Filter Tuple
5282 * for this adapter which we need frequently ...
5284 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
5285 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
5286 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
5287 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
5289 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
5291 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
5293 adap->params.tp.tos_shift = t4_filter_field_shift(adap, F_TOS);
5295 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
5296 adap->params.tp.hash_filter_mask = v;
5297 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
5298 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
5304 * t4_filter_field_shift - calculate filter field shift
5305 * @adap: the adapter
5306 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
5308 * Return the shift position of a filter field within the Compressed
5309 * Filter Tuple. The filter field is specified via its selection bit
5310 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
5312 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
5314 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
5318 if ((filter_mode & filter_sel) == 0)
5321 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
5322 switch (filter_mode & sel) {
5324 field_shift += W_FT_FCOE;
5327 field_shift += W_FT_PORT;
5330 field_shift += W_FT_VNIC_ID;
5333 field_shift += W_FT_VLAN;
5336 field_shift += W_FT_TOS;
5339 field_shift += W_FT_PROTOCOL;
5342 field_shift += W_FT_ETHERTYPE;
5345 field_shift += W_FT_MACMATCH;
5348 field_shift += W_FT_MPSHITTYPE;
5350 case F_FRAGMENTATION:
5351 field_shift += W_FT_FRAGMENTATION;
5358 int t4_init_rss_mode(struct adapter *adap, int mbox)
5361 struct fw_rss_vi_config_cmd rvc;
5363 memset(&rvc, 0, sizeof(rvc));
5365 for_each_port(adap, i) {
5366 struct port_info *p = adap2pinfo(adap, i);
5368 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
5369 F_FW_CMD_REQUEST | F_FW_CMD_READ |
5370 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
5371 rvc.retval_len16 = htonl(FW_LEN16(rvc));
5372 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
5375 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
5380 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
5382 u32 param, val, pcaps, acaps;
5383 enum fw_port_type port_type;
5384 struct fw_port_cmd cmd;
5385 u8 vivld = 0, vin = 0;
5390 param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) |
5391 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
5393 ret = t4_set_params(adap, mbox, pf, vf, 1, ¶m, &val);
5397 memset(&cmd, 0, sizeof(cmd));
5399 for_each_port(adap, i) {
5400 struct port_info *pi = adap2pinfo(adap, i);
5401 unsigned int rss_size = 0;
5404 while ((adap->params.portvec & (1 << j)) == 0)
5407 memset(&cmd, 0, sizeof(cmd));
5408 cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
5411 V_FW_PORT_CMD_PORTID(j));
5412 val = FW_PORT_ACTION_GET_PORT_INFO32;
5413 cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(val) |
5415 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
5419 /* Extract the various fields from the Port Information
5422 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
5424 port_type = G_FW_PORT_CMD_PORTTYPE32(lstatus32);
5425 mdio_addr = (lstatus32 & F_FW_PORT_CMD_MDIOCAP32) ?
5426 (int)G_FW_PORT_CMD_MDIOADDR32(lstatus32) : -1;
5427 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
5428 acaps = be32_to_cpu(cmd.u.info32.acaps32);
5430 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size,
5437 pi->rss_size = rss_size;
5438 t4_os_set_hw_addr(adap, i, addr);
5440 /* If fw supports returning the VIN as part of FW_VI_CMD,
5441 * save the returned values.
5443 if (adap->params.viid_smt_extn_support) {
5447 /* Retrieve the values from VIID */
5448 pi->vivld = G_FW_VIID_VIVLD(pi->viid);
5449 pi->vin = G_FW_VIID_VIN(pi->viid);
5452 t4_init_link_config(pi, pcaps, acaps, mdio_addr, port_type,
5453 FW_PORT_MOD_TYPE_NA);
5460 * t4_memory_rw_addr - read/write adapter memory via PCIE memory window
5461 * @adap: the adapter
5462 * @win: PCI-E Memory Window to use
5463 * @addr: address within adapter memory
5464 * @len: amount of memory to transfer
5465 * @hbuf: host memory buffer
5466 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5468 * Reads/writes an [almost] arbitrary memory region in the firmware: the
5469 * firmware memory address and host buffer must be aligned on 32-bit
5470 * boudaries; the length may be arbitrary.
5473 * 1. The memory is transferred as a raw byte sequence from/to the
5474 * firmware's memory. If this memory contains data structures which
5475 * contain multi-byte integers, it's the caller's responsibility to
5476 * perform appropriate byte order conversions.
5478 * 2. It is the Caller's responsibility to ensure that no other code
5479 * uses the specified PCI-E Memory Window while this routine is
5480 * using it. This is typically done via the use of OS-specific
5483 int t4_memory_rw_addr(struct adapter *adap, int win, u32 addr,
5484 u32 len, void *hbuf, int dir)
5486 u32 pos, offset, resid;
5487 u32 win_pf, mem_reg, mem_aperture, mem_base;
5490 /* Argument sanity checks ...*/
5491 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
5495 /* It's convenient to be able to handle lengths which aren't a
5496 * multiple of 32-bits because we often end up transferring files to
5497 * the firmware. So we'll handle that by normalizing the length here
5498 * and then handling any residual transfer at the end.
5503 /* Each PCI-E Memory Window is programmed with a window size -- or
5504 * "aperture" -- which controls the granularity of its mapping onto
5505 * adapter memory. We need to grab that aperture in order to know
5506 * how to use the specified window. The window is also programmed
5507 * with the base address of the Memory Window in BAR0's address
5508 * space. For T4 this is an absolute PCI-E Bus Address. For T5
5509 * the address is relative to BAR0.
5511 mem_reg = t4_read_reg(adap,
5512 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN,
5514 mem_aperture = 1 << (G_WINDOW(mem_reg) + X_WINDOW_SHIFT);
5515 mem_base = G_PCIEOFST(mem_reg) << X_PCIEOFST_SHIFT;
5517 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->pf);
5519 /* Calculate our initial PCI-E Memory Window Position and Offset into
5522 pos = addr & ~(mem_aperture - 1);
5523 offset = addr - pos;
5525 /* Set up initial PCI-E Memory Window to cover the start of our
5526 * transfer. (Read it back to ensure that changes propagate before we
5527 * attempt to use the new value.)
5530 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win),
5533 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, win));
5535 /* Transfer data to/from the adapter as long as there's an integral
5536 * number of 32-bit transfers to complete.
5538 * A note on Endianness issues:
5540 * The "register" reads and writes below from/to the PCI-E Memory
5541 * Window invoke the standard adapter Big-Endian to PCI-E Link
5542 * Little-Endian "swizzel." As a result, if we have the following
5543 * data in adapter memory:
5545 * Memory: ... | b0 | b1 | b2 | b3 | ...
5546 * Address: i+0 i+1 i+2 i+3
5548 * Then a read of the adapter memory via the PCI-E Memory Window
5553 * [ b3 | b2 | b1 | b0 ]
5555 * If this value is stored into local memory on a Little-Endian system
5556 * it will show up correctly in local memory as:
5558 * ( ..., b0, b1, b2, b3, ... )
5560 * But on a Big-Endian system, the store will show up in memory
5561 * incorrectly swizzled as:
5563 * ( ..., b3, b2, b1, b0, ... )
5565 * So we need to account for this in the reads and writes to the
5566 * PCI-E Memory Window below by undoing the register read/write
5570 if (dir == T4_MEMORY_READ)
5571 *buf++ = le32_to_cpu((__le32)t4_read_reg(adap,
5575 t4_write_reg(adap, mem_base + offset,
5576 (u32)cpu_to_le32(*buf++));
5577 offset += sizeof(__be32);
5578 len -= sizeof(__be32);
5580 /* If we've reached the end of our current window aperture,
5581 * move the PCI-E Memory Window on to the next. Note that
5582 * doing this here after "len" may be 0 allows us to set up
5583 * the PCI-E Memory Window for a possible final residual
5584 * transfer below ...
5586 if (offset == mem_aperture) {
5587 pos += mem_aperture;
5590 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5591 win), pos | win_pf);
5593 PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET,
5598 /* If the original transfer had a length which wasn't a multiple of
5599 * 32-bits, now's where we need to finish off the transfer of the
5600 * residual amount. The PCI-E Memory Window has already been moved
5601 * above (if necessary) to cover this final transfer.
5611 if (dir == T4_MEMORY_READ) {
5612 last.word = le32_to_cpu((__le32)t4_read_reg(adap,
5615 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
5616 bp[i] = last.byte[i];
5619 for (i = resid; i < 4; i++)
5621 t4_write_reg(adap, mem_base + offset,
5622 (u32)cpu_to_le32(last.word));
5630 * t4_memory_rw_mtype -read/write EDC 0, EDC 1 or MC via PCIE memory window
5631 * @adap: the adapter
5632 * @win: PCI-E Memory Window to use
5633 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
5634 * @maddr: address within indicated memory type
5635 * @len: amount of memory to transfer
5636 * @hbuf: host memory buffer
5637 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
5639 * Reads/writes adapter memory using t4_memory_rw_addr(). This routine
5640 * provides an (memory type, address within memory type) interface.
5642 int t4_memory_rw_mtype(struct adapter *adap, int win, int mtype, u32 maddr,
5643 u32 len, void *hbuf, int dir)
5646 u32 edc_size, mc_size;
5648 /* Offset into the region of memory which is being accessed
5651 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
5652 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
5654 edc_size = G_EDRAM0_SIZE(t4_read_reg(adap, A_MA_EDRAM0_BAR));
5655 if (mtype != MEM_MC1) {
5656 mtype_offset = (mtype * (edc_size * 1024 * 1024));
5658 mc_size = G_EXT_MEM0_SIZE(t4_read_reg(adap,
5659 A_MA_EXT_MEMORY0_BAR));
5660 mtype_offset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
5663 return t4_memory_rw_addr(adap, win,
5664 mtype_offset + maddr, len,