4 * Copyright(c) 2014-2017 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
36 #include <rte_interrupts.h>
38 #include <rte_debug.h>
40 #include <rte_atomic.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_memory.h>
43 #include <rte_tailq.h>
45 #include <rte_alarm.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_malloc.h>
49 #include <rte_random.h>
51 #include <rte_byteorder.h>
55 #include "t4_regs_values.h"
56 #include "t4fw_interface.h"
58 static void init_link_config(struct link_config *lc, unsigned int pcaps,
62 * t4_read_mtu_tbl - returns the values in the HW path MTU table
64 * @mtus: where to store the MTU values
65 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
67 * Reads the HW path MTU table.
69 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
74 for (i = 0; i < NMTUS; ++i) {
75 t4_write_reg(adap, A_TP_MTU_TABLE,
76 V_MTUINDEX(0xff) | V_MTUVALUE(i));
77 v = t4_read_reg(adap, A_TP_MTU_TABLE);
78 mtus[i] = G_MTUVALUE(v);
80 mtu_log[i] = G_MTUWIDTH(v);
85 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
87 * @addr: the indirect TP register address
88 * @mask: specifies the field within the register to modify
89 * @val: new value for the field
91 * Sets a field of an indirect TP register to the given value.
93 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
94 unsigned int mask, unsigned int val)
96 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
97 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
98 t4_write_reg(adap, A_TP_PIO_DATA, val);
101 /* The minimum additive increment value for the congestion control table */
102 #define CC_MIN_INCR 2U
105 * t4_load_mtus - write the MTU and congestion control HW tables
107 * @mtus: the values for the MTU table
108 * @alpha: the values for the congestion control alpha parameter
109 * @beta: the values for the congestion control beta parameter
111 * Write the HW MTU table with the supplied MTUs and the high-speed
112 * congestion control table with the supplied alpha, beta, and MTUs.
113 * We write the two tables together because the additive increments
114 * depend on the MTUs.
116 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
117 const unsigned short *alpha, const unsigned short *beta)
119 static const unsigned int avg_pkts[NCCTRL_WIN] = {
120 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
121 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
122 28672, 40960, 57344, 81920, 114688, 163840, 229376
127 for (i = 0; i < NMTUS; ++i) {
128 unsigned int mtu = mtus[i];
129 unsigned int log2 = cxgbe_fls(mtu);
131 if (!(mtu & ((1 << log2) >> 2))) /* round */
133 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
134 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
136 for (w = 0; w < NCCTRL_WIN; ++w) {
139 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
142 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
143 (w << 16) | (beta[w] << 13) | inc);
149 * t4_wait_op_done_val - wait until an operation is completed
150 * @adapter: the adapter performing the operation
151 * @reg: the register to check for completion
152 * @mask: a single-bit field within @reg that indicates completion
153 * @polarity: the value of the field when the operation is completed
154 * @attempts: number of check iterations
155 * @delay: delay in usecs between iterations
156 * @valp: where to store the value of the register at completion time
158 * Wait until an operation is completed by checking a bit in a register
159 * up to @attempts times. If @valp is not NULL the value of the register
160 * at the time it indicated completion is stored there. Returns 0 if the
161 * operation completes and -EAGAIN otherwise.
163 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
164 int polarity, int attempts, int delay, u32 *valp)
167 u32 val = t4_read_reg(adapter, reg);
169 if (!!(val & mask) == polarity) {
182 * t4_set_reg_field - set a register field to a value
183 * @adapter: the adapter to program
184 * @addr: the register address
185 * @mask: specifies the portion of the register to modify
186 * @val: the new value for the register field
188 * Sets a register field specified by the supplied mask to the
191 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
194 u32 v = t4_read_reg(adapter, addr) & ~mask;
196 t4_write_reg(adapter, addr, v | val);
197 (void)t4_read_reg(adapter, addr); /* flush */
201 * t4_read_indirect - read indirectly addressed registers
203 * @addr_reg: register holding the indirect address
204 * @data_reg: register holding the value of the indirect register
205 * @vals: where the read register values are stored
206 * @nregs: how many indirect registers to read
207 * @start_idx: index of first indirect register to read
209 * Reads registers that are accessed indirectly through an address/data
212 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
213 unsigned int data_reg, u32 *vals, unsigned int nregs,
214 unsigned int start_idx)
217 t4_write_reg(adap, addr_reg, start_idx);
218 *vals++ = t4_read_reg(adap, data_reg);
224 * t4_write_indirect - write indirectly addressed registers
226 * @addr_reg: register holding the indirect addresses
227 * @data_reg: register holding the value for the indirect registers
228 * @vals: values to write
229 * @nregs: how many indirect registers to write
230 * @start_idx: address of first indirect register to write
232 * Writes a sequential block of registers that are accessed indirectly
233 * through an address/data register pair.
235 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
236 unsigned int data_reg, const u32 *vals,
237 unsigned int nregs, unsigned int start_idx)
240 t4_write_reg(adap, addr_reg, start_idx++);
241 t4_write_reg(adap, data_reg, *vals++);
246 * t4_report_fw_error - report firmware error
249 * The adapter firmware can indicate error conditions to the host.
250 * If the firmware has indicated an error, print out the reason for
251 * the firmware error.
253 static void t4_report_fw_error(struct adapter *adap)
255 static const char * const reason[] = {
256 "Crash", /* PCIE_FW_EVAL_CRASH */
257 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
258 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
259 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
260 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
261 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
262 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
263 "Reserved", /* reserved */
267 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
268 if (pcie_fw & F_PCIE_FW_ERR)
269 pr_err("%s: Firmware reports adapter error: %s\n",
270 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
274 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
276 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
279 for ( ; nflit; nflit--, mbox_addr += 8)
280 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
284 * Handle a FW assertion reported in a mailbox.
286 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
288 struct fw_debug_cmd asrt;
290 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
291 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
292 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
293 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
296 #define X_CIM_PF_NOACCESS 0xeeeeeeee
299 * If the Host OS Driver needs locking arround accesses to the mailbox, this
300 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
302 /* makes single-statement usage a bit cleaner ... */
303 #ifdef T4_OS_NEEDS_MBOX_LOCKING
304 #define T4_OS_MBOX_LOCKING(x) x
306 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
310 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
312 * @mbox: index of the mailbox to use
313 * @cmd: the command to write
314 * @size: command length in bytes
315 * @rpl: where to optionally store the reply
316 * @sleep_ok: if true we may sleep while awaiting command completion
317 * @timeout: time to wait for command to finish before timing out
318 * (negative implies @sleep_ok=false)
320 * Sends the given command to FW through the selected mailbox and waits
321 * for the FW to execute the command. If @rpl is not %NULL it is used to
322 * store the FW's reply to the command. The command and its optional
323 * reply are of the same length. Some FW commands like RESET and
324 * INITIALIZE can take a considerable amount of time to execute.
325 * @sleep_ok determines whether we may sleep while awaiting the response.
326 * If sleeping is allowed we use progressive backoff otherwise we spin.
327 * Note that passing in a negative @timeout is an alternate mechanism
328 * for specifying @sleep_ok=false. This is useful when a higher level
329 * interface allows for specification of @timeout but not @sleep_ok ...
331 * Returns 0 on success or a negative errno on failure. A
332 * failure can happen either because we are not able to execute the
333 * command or FW executes it but signals an error. In the latter case
334 * the return value is the error code indicated by FW (negated).
336 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
337 const void __attribute__((__may_alias__)) *cmd,
338 int size, void *rpl, bool sleep_ok, int timeout)
341 * We delay in small increments at first in an effort to maintain
342 * responsiveness for simple, fast executing commands but then back
343 * off to larger delays to a maximum retry delay.
345 static const int delay[] = {
346 1, 1, 3, 5, 10, 10, 20, 50, 100
352 unsigned int delay_idx;
353 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
355 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
356 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
358 struct mbox_entry entry;
364 if ((size & 15) || size > MBOX_LEN) {
370 memcpy(p, (const __be64 *)cmd, size);
373 * If we have a negative timeout, that implies that we can't sleep.
380 #ifdef T4_OS_NEEDS_MBOX_LOCKING
382 * Queue ourselves onto the mailbox access list. When our entry is at
383 * the front of the list, we have rights to access the mailbox. So we
384 * wait [for a while] till we're at the front [or bail out with an
387 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
392 for (i = 0; ; i += ms) {
394 * If we've waited too long, return a busy indication. This
395 * really ought to be based on our initial position in the
396 * mailbox access list but this is a start. We very rarely
397 * contend on access to the mailbox ... Also check for a
398 * firmware error which we'll report as a device error.
400 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
401 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
402 t4_os_atomic_list_del(&entry, &adap->mbox_list,
404 t4_report_fw_error(adap);
406 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
410 * If we're at the head, break out and start the mailbox
413 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
417 * Delay for a bit before checking again ...
420 ms = delay[delay_idx]; /* last element may repeat */
421 if (delay_idx < ARRAY_SIZE(delay) - 1)
428 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
431 * Attempt to gain access to the mailbox.
433 for (i = 0; i < 4; i++) {
434 ctl = t4_read_reg(adap, ctl_reg);
436 if (v != X_MBOWNER_NONE)
441 * If we were unable to gain access, dequeue ourselves from the
442 * mailbox atomic access list and report the error to our caller.
444 if (v != X_MBOWNER_PL) {
445 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
448 t4_report_fw_error(adap);
450 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
454 * If we gain ownership of the mailbox and there's a "valid" message
455 * in it, this is likely an asynchronous error message from the
456 * firmware. So we'll report that and then proceed on with attempting
457 * to issue our own command ... which may well fail if the error
458 * presaged the firmware crashing ...
460 if (ctl & F_MBMSGVALID) {
461 dev_err(adap, "found VALID command in mbox %u: "
462 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
463 (unsigned long long)t4_read_reg64(adap, data_reg),
464 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
465 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
466 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
467 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
468 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
469 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
470 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
474 * Copy in the new mailbox command and send it on its way ...
476 for (i = 0; i < size; i += 8, p++)
477 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
479 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
480 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
481 (unsigned long long)t4_read_reg64(adap, data_reg),
482 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
483 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
484 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
485 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
486 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
487 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
488 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
490 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
491 t4_read_reg(adap, ctl_reg); /* flush write */
497 * Loop waiting for the reply; bail out if we time out or the firmware
500 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
501 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
503 ms = delay[delay_idx]; /* last element may repeat */
504 if (delay_idx < ARRAY_SIZE(delay) - 1)
511 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
512 v = t4_read_reg(adap, ctl_reg);
513 if (v == X_CIM_PF_NOACCESS)
515 if (G_MBOWNER(v) == X_MBOWNER_PL) {
516 if (!(v & F_MBMSGVALID)) {
517 t4_write_reg(adap, ctl_reg,
518 V_MBOWNER(X_MBOWNER_NONE));
522 CXGBE_DEBUG_MBOX(adap,
523 "%s: mbox %u: %016llx %016llx %016llx %016llx "
524 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
525 (unsigned long long)t4_read_reg64(adap, data_reg),
526 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
527 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
528 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
529 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
530 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
531 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
532 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
534 CXGBE_DEBUG_MBOX(adap,
535 "command %#x completed in %d ms (%ssleeping)\n",
537 i + ms, sleep_ok ? "" : "non-");
539 res = t4_read_reg64(adap, data_reg);
540 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
541 fw_asrt(adap, data_reg);
542 res = V_FW_CMD_RETVAL(EIO);
544 get_mbox_rpl(adap, rpl, size / 8, data_reg);
546 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
548 t4_os_atomic_list_del(&entry, &adap->mbox_list,
551 return -G_FW_CMD_RETVAL((int)res);
556 * We timed out waiting for a reply to our mailbox command. Report
557 * the error and also check to see if the firmware reported any
560 dev_err(adap, "command %#x in mailbox %d timed out\n",
561 *(const u8 *)cmd, mbox);
562 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
565 t4_report_fw_error(adap);
567 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
570 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
571 void *rpl, bool sleep_ok)
573 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
578 * t4_get_regs_len - return the size of the chips register set
579 * @adapter: the adapter
581 * Returns the size of the chip's BAR0 register space.
583 unsigned int t4_get_regs_len(struct adapter *adapter)
585 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
587 switch (chip_version) {
590 return T5_REGMAP_SIZE;
594 "Unsupported chip version %d\n", chip_version);
599 * t4_get_regs - read chip registers into provided buffer
601 * @buf: register buffer
602 * @buf_size: size (in bytes) of register buffer
604 * If the provided register buffer isn't large enough for the chip's
605 * full register range, the register dump will be truncated to the
606 * register buffer's size.
608 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
610 static const unsigned int t5_reg_ranges[] = {
1385 static const unsigned int t6_reg_ranges[] = {
1946 u32 *buf_end = (u32 *)((char *)buf + buf_size);
1947 const unsigned int *reg_ranges;
1948 int reg_ranges_size, range;
1949 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
1951 /* Select the right set of register ranges to dump depending on the
1952 * adapter chip type.
1954 switch (chip_version) {
1956 reg_ranges = t5_reg_ranges;
1957 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
1961 reg_ranges = t6_reg_ranges;
1962 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
1967 "Unsupported chip version %d\n", chip_version);
1971 /* Clear the register buffer and insert the appropriate register
1972 * values selected by the above register ranges.
1974 memset(buf, 0, buf_size);
1975 for (range = 0; range < reg_ranges_size; range += 2) {
1976 unsigned int reg = reg_ranges[range];
1977 unsigned int last_reg = reg_ranges[range + 1];
1978 u32 *bufp = (u32 *)((char *)buf + reg);
1980 /* Iterate across the register range filling in the register
1981 * buffer but don't write past the end of the register buffer.
1983 while (reg <= last_reg && bufp < buf_end) {
1984 *bufp++ = t4_read_reg(adap, reg);
1990 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
1991 #define EEPROM_DELAY 10 /* 10us per poll spin */
1992 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
1994 #define EEPROM_STAT_ADDR 0x7bfc
1997 * Small utility function to wait till any outstanding VPD Access is complete.
1998 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
1999 * VPD Access in flight. This allows us to handle the problem of having a
2000 * previous VPD Access time out and prevent an attempt to inject a new VPD
2001 * Request before any in-flight VPD request has completed.
2003 static int t4_seeprom_wait(struct adapter *adapter)
2005 unsigned int base = adapter->params.pci.vpd_cap_addr;
2008 /* If no VPD Access is in flight, we can just return success right
2011 if (!adapter->vpd_busy)
2014 /* Poll the VPD Capability Address/Flag register waiting for it
2015 * to indicate that the operation is complete.
2017 max_poll = EEPROM_MAX_POLL;
2021 udelay(EEPROM_DELAY);
2022 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
2024 /* If the operation is complete, mark the VPD as no longer
2025 * busy and return success.
2027 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
2028 adapter->vpd_busy = 0;
2031 } while (--max_poll);
2033 /* Failure! Note that we leave the VPD Busy status set in order to
2034 * avoid pushing a new VPD Access request into the VPD Capability till
2035 * the current operation eventually succeeds. It's a bug to issue a
2036 * new request when an existing request is in flight and will result
2037 * in corrupt hardware state.
2043 * t4_seeprom_read - read a serial EEPROM location
2044 * @adapter: adapter to read
2045 * @addr: EEPROM virtual address
2046 * @data: where to store the read data
2048 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
2049 * VPD capability. Note that this function must be called with a virtual
2052 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
2054 unsigned int base = adapter->params.pci.vpd_cap_addr;
2057 /* VPD Accesses must alway be 4-byte aligned!
2059 if (addr >= EEPROMVSIZE || (addr & 3))
2062 /* Wait for any previous operation which may still be in flight to
2065 ret = t4_seeprom_wait(adapter);
2067 dev_err(adapter, "VPD still busy from previous operation\n");
2071 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2072 * for our request to complete. If it doesn't complete, note the
2073 * error and return it to our caller. Note that we do not reset the
2076 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
2077 adapter->vpd_busy = 1;
2078 adapter->vpd_flag = PCI_VPD_ADDR_F;
2079 ret = t4_seeprom_wait(adapter);
2081 dev_err(adapter, "VPD read of address %#x failed\n", addr);
2085 /* Grab the returned data, swizzle it into our endianness and
2088 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
2089 *data = le32_to_cpu(*data);
2094 * t4_seeprom_write - write a serial EEPROM location
2095 * @adapter: adapter to write
2096 * @addr: virtual EEPROM address
2097 * @data: value to write
2099 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
2100 * VPD capability. Note that this function must be called with a virtual
2103 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
2105 unsigned int base = adapter->params.pci.vpd_cap_addr;
2110 /* VPD Accesses must alway be 4-byte aligned!
2112 if (addr >= EEPROMVSIZE || (addr & 3))
2115 /* Wait for any previous operation which may still be in flight to
2118 ret = t4_seeprom_wait(adapter);
2120 dev_err(adapter, "VPD still busy from previous operation\n");
2124 /* Issue our new VPD Read request, mark the VPD as being busy and wait
2125 * for our request to complete. If it doesn't complete, note the
2126 * error and return it to our caller. Note that we do not reset the
2129 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
2131 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
2132 (u16)addr | PCI_VPD_ADDR_F);
2133 adapter->vpd_busy = 1;
2134 adapter->vpd_flag = 0;
2135 ret = t4_seeprom_wait(adapter);
2137 dev_err(adapter, "VPD write of address %#x failed\n", addr);
2141 /* Reset PCI_VPD_DATA register after a transaction and wait for our
2142 * request to complete. If it doesn't complete, return error.
2144 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
2145 max_poll = EEPROM_MAX_POLL;
2147 udelay(EEPROM_DELAY);
2148 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
2149 } while ((stats_reg & 0x1) && --max_poll);
2153 /* Return success! */
2158 * t4_seeprom_wp - enable/disable EEPROM write protection
2159 * @adapter: the adapter
2160 * @enable: whether to enable or disable write protection
2162 * Enables or disables write protection on the serial EEPROM.
2164 int t4_seeprom_wp(struct adapter *adapter, int enable)
2166 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
2170 * t4_config_rss_range - configure a portion of the RSS mapping table
2171 * @adapter: the adapter
2172 * @mbox: mbox to use for the FW command
2173 * @viid: virtual interface whose RSS subtable is to be written
2174 * @start: start entry in the table to write
2175 * @n: how many table entries to write
2176 * @rspq: values for the "response queue" (Ingress Queue) lookup table
2177 * @nrspq: number of values in @rspq
2179 * Programs the selected part of the VI's RSS mapping table with the
2180 * provided values. If @nrspq < @n the supplied values are used repeatedly
2181 * until the full table range is populated.
2183 * The caller must ensure the values in @rspq are in the range allowed for
2186 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2187 int start, int n, const u16 *rspq, unsigned int nrspq)
2190 const u16 *rsp = rspq;
2191 const u16 *rsp_end = rspq + nrspq;
2192 struct fw_rss_ind_tbl_cmd cmd;
2194 memset(&cmd, 0, sizeof(cmd));
2195 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2196 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2197 V_FW_RSS_IND_TBL_CMD_VIID(viid));
2198 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2201 * Each firmware RSS command can accommodate up to 32 RSS Ingress
2202 * Queue Identifiers. These Ingress Queue IDs are packed three to
2203 * a 32-bit word as 10-bit values with the upper remaining 2 bits
2207 int nq = min(n, 32);
2209 __be32 *qp = &cmd.iq0_to_iq2;
2212 * Set up the firmware RSS command header to send the next
2213 * "nq" Ingress Queue IDs to the firmware.
2215 cmd.niqid = cpu_to_be16(nq);
2216 cmd.startidx = cpu_to_be16(start);
2219 * "nq" more done for the start of the next loop.
2225 * While there are still Ingress Queue IDs to stuff into the
2226 * current firmware RSS command, retrieve them from the
2227 * Ingress Queue ID array and insert them into the command.
2231 * Grab up to the next 3 Ingress Queue IDs (wrapping
2232 * around the Ingress Queue ID array if necessary) and
2233 * insert them into the firmware RSS command at the
2234 * current 3-tuple position within the commad.
2238 int nqbuf = min(3, nq);
2244 while (nqbuf && nq_packed < 32) {
2251 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
2252 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
2253 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
2257 * Send this portion of the RRS table update to the firmware;
2258 * bail out on any errors.
2260 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2269 * t4_config_vi_rss - configure per VI RSS settings
2270 * @adapter: the adapter
2271 * @mbox: mbox to use for the FW command
2274 * @defq: id of the default RSS queue for the VI.
2276 * Configures VI-specific RSS properties.
2278 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
2279 unsigned int flags, unsigned int defq)
2281 struct fw_rss_vi_config_cmd c;
2283 memset(&c, 0, sizeof(c));
2284 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2285 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2286 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
2287 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2288 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
2289 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
2290 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2294 * init_cong_ctrl - initialize congestion control parameters
2295 * @a: the alpha values for congestion control
2296 * @b: the beta values for congestion control
2298 * Initialize the congestion control parameters.
2300 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2304 for (i = 0; i < 9; i++) {
2358 #define INIT_CMD(var, cmd, rd_wr) do { \
2359 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
2360 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
2361 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
2364 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
2366 u32 cclk_param, cclk_val;
2370 * Ask firmware for the Core Clock since it knows how to translate the
2371 * Reference Clock ('V2') VPD field into a Core Clock value ...
2373 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
2374 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
2375 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2376 1, &cclk_param, &cclk_val);
2378 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
2384 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
2388 /* serial flash and firmware constants and flash config file constants */
2390 SF_ATTEMPTS = 10, /* max retries for SF operations */
2392 /* flash command opcodes */
2393 SF_PROG_PAGE = 2, /* program page */
2394 SF_WR_DISABLE = 4, /* disable writes */
2395 SF_RD_STATUS = 5, /* read status register */
2396 SF_WR_ENABLE = 6, /* enable writes */
2397 SF_RD_DATA_FAST = 0xb, /* read flash */
2398 SF_RD_ID = 0x9f, /* read ID */
2399 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2403 * sf1_read - read data from the serial flash
2404 * @adapter: the adapter
2405 * @byte_cnt: number of bytes to read
2406 * @cont: whether another operation will be chained
2407 * @lock: whether to lock SF for PL access only
2408 * @valp: where to store the read data
2410 * Reads up to 4 bytes of data from the serial flash. The location of
2411 * the read needs to be specified prior to calling this by issuing the
2412 * appropriate commands to the serial flash.
2414 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2415 int lock, u32 *valp)
2419 if (!byte_cnt || byte_cnt > 4)
2421 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2423 t4_write_reg(adapter, A_SF_OP,
2424 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
2425 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2427 *valp = t4_read_reg(adapter, A_SF_DATA);
2432 * sf1_write - write data to the serial flash
2433 * @adapter: the adapter
2434 * @byte_cnt: number of bytes to write
2435 * @cont: whether another operation will be chained
2436 * @lock: whether to lock SF for PL access only
2437 * @val: value to write
2439 * Writes up to 4 bytes of data to the serial flash. The location of
2440 * the write needs to be specified prior to calling this by issuing the
2441 * appropriate commands to the serial flash.
2443 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2446 if (!byte_cnt || byte_cnt > 4)
2448 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
2450 t4_write_reg(adapter, A_SF_DATA, val);
2451 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
2452 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
2453 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
2457 * t4_read_flash - read words from serial flash
2458 * @adapter: the adapter
2459 * @addr: the start address for the read
2460 * @nwords: how many 32-bit words to read
2461 * @data: where to store the read data
2462 * @byte_oriented: whether to store data as bytes or as words
2464 * Read the specified number of 32-bit words from the serial flash.
2465 * If @byte_oriented is set the read data is stored as a byte array
2466 * (i.e., big-endian), otherwise as 32-bit words in the platform's
2467 * natural endianness.
2469 int t4_read_flash(struct adapter *adapter, unsigned int addr,
2470 unsigned int nwords, u32 *data, int byte_oriented)
2474 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
2478 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
2480 ret = sf1_write(adapter, 4, 1, 0, addr);
2484 ret = sf1_read(adapter, 1, 1, 0, data);
2488 for ( ; nwords; nwords--, data++) {
2489 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
2491 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2495 *data = cpu_to_be32(*data);
2501 * t4_get_exprom_version - return the Expansion ROM version (if any)
2502 * @adapter: the adapter
2503 * @vers: where to place the version
2505 * Reads the Expansion ROM header from FLASH and returns the version
2506 * number (if present) through the @vers return value pointer. We return
2507 * this in the Firmware Version Format since it's convenient. Return
2508 * 0 on success, -ENOENT if no Expansion ROM is present.
2510 static int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
2512 struct exprom_header {
2513 unsigned char hdr_arr[16]; /* must start with 0x55aa */
2514 unsigned char hdr_ver[4]; /* Expansion ROM version */
2516 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
2520 ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
2521 ARRAY_SIZE(exprom_header_buf),
2522 exprom_header_buf, 0);
2526 hdr = (struct exprom_header *)exprom_header_buf;
2527 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
2530 *vers = (V_FW_HDR_FW_VER_MAJOR(hdr->hdr_ver[0]) |
2531 V_FW_HDR_FW_VER_MINOR(hdr->hdr_ver[1]) |
2532 V_FW_HDR_FW_VER_MICRO(hdr->hdr_ver[2]) |
2533 V_FW_HDR_FW_VER_BUILD(hdr->hdr_ver[3]));
2538 * t4_get_fw_version - read the firmware version
2539 * @adapter: the adapter
2540 * @vers: where to place the version
2542 * Reads the FW version from flash.
2544 static int t4_get_fw_version(struct adapter *adapter, u32 *vers)
2546 return t4_read_flash(adapter, FLASH_FW_START +
2547 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
2551 * t4_get_bs_version - read the firmware bootstrap version
2552 * @adapter: the adapter
2553 * @vers: where to place the version
2555 * Reads the FW Bootstrap version from flash.
2557 static int t4_get_bs_version(struct adapter *adapter, u32 *vers)
2559 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
2560 offsetof(struct fw_hdr, fw_ver), 1,
2565 * t4_get_tp_version - read the TP microcode version
2566 * @adapter: the adapter
2567 * @vers: where to place the version
2569 * Reads the TP microcode version from flash.
2571 static int t4_get_tp_version(struct adapter *adapter, u32 *vers)
2573 return t4_read_flash(adapter, FLASH_FW_START +
2574 offsetof(struct fw_hdr, tp_microcode_ver),
2579 * t4_get_version_info - extract various chip/firmware version information
2580 * @adapter: the adapter
2582 * Reads various chip/firmware version numbers and stores them into the
2583 * adapter Adapter Parameters structure. If any of the efforts fails
2584 * the first failure will be returned, but all of the version numbers
2587 int t4_get_version_info(struct adapter *adapter)
2591 #define FIRST_RET(__getvinfo) \
2593 int __ret = __getvinfo; \
2594 if (__ret && !ret) \
2598 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
2599 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
2600 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
2601 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
2609 * t4_dump_version_info - dump all of the adapter configuration IDs
2610 * @adapter: the adapter
2612 * Dumps all of the various bits of adapter configuration version/revision
2613 * IDs information. This is typically called at some point after
2614 * t4_get_version_info() has been called.
2616 void t4_dump_version_info(struct adapter *adapter)
2619 * Device information.
2621 dev_info(adapter, "Chelsio rev %d\n",
2622 CHELSIO_CHIP_RELEASE(adapter->params.chip));
2627 if (!adapter->params.fw_vers)
2628 dev_warn(adapter, "No firmware loaded\n");
2630 dev_info(adapter, "Firmware version: %u.%u.%u.%u\n",
2631 G_FW_HDR_FW_VER_MAJOR(adapter->params.fw_vers),
2632 G_FW_HDR_FW_VER_MINOR(adapter->params.fw_vers),
2633 G_FW_HDR_FW_VER_MICRO(adapter->params.fw_vers),
2634 G_FW_HDR_FW_VER_BUILD(adapter->params.fw_vers));
2637 * Bootstrap Firmware Version.
2639 if (!adapter->params.bs_vers)
2640 dev_warn(adapter, "No bootstrap loaded\n");
2642 dev_info(adapter, "Bootstrap version: %u.%u.%u.%u\n",
2643 G_FW_HDR_FW_VER_MAJOR(adapter->params.bs_vers),
2644 G_FW_HDR_FW_VER_MINOR(adapter->params.bs_vers),
2645 G_FW_HDR_FW_VER_MICRO(adapter->params.bs_vers),
2646 G_FW_HDR_FW_VER_BUILD(adapter->params.bs_vers));
2649 * TP Microcode Version.
2651 if (!adapter->params.tp_vers)
2652 dev_warn(adapter, "No TP Microcode loaded\n");
2654 dev_info(adapter, "TP Microcode version: %u.%u.%u.%u\n",
2655 G_FW_HDR_FW_VER_MAJOR(adapter->params.tp_vers),
2656 G_FW_HDR_FW_VER_MINOR(adapter->params.tp_vers),
2657 G_FW_HDR_FW_VER_MICRO(adapter->params.tp_vers),
2658 G_FW_HDR_FW_VER_BUILD(adapter->params.tp_vers));
2661 * Expansion ROM version.
2663 if (!adapter->params.er_vers)
2664 dev_info(adapter, "No Expansion ROM loaded\n");
2666 dev_info(adapter, "Expansion ROM version: %u.%u.%u.%u\n",
2667 G_FW_HDR_FW_VER_MAJOR(adapter->params.er_vers),
2668 G_FW_HDR_FW_VER_MINOR(adapter->params.er_vers),
2669 G_FW_HDR_FW_VER_MICRO(adapter->params.er_vers),
2670 G_FW_HDR_FW_VER_BUILD(adapter->params.er_vers));
2673 #define ADVERT_MASK (V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED) | \
2677 * t4_link_l1cfg - apply link configuration to MAC/PHY
2678 * @phy: the PHY to setup
2679 * @mac: the MAC to setup
2680 * @lc: the requested link configuration
2682 * Set up a port's MAC and PHY according to a desired link configuration.
2683 * - If the PHY can auto-negotiate first decide what to advertise, then
2684 * enable/disable auto-negotiation as desired, and reset.
2685 * - If the PHY does not auto-negotiate just reset it.
2686 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
2687 * otherwise do it later based on the outcome of auto-negotiation.
2689 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
2690 struct link_config *lc)
2692 struct fw_port_cmd c;
2693 unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
2694 unsigned int fc, fec;
2698 if (lc->requested_fc & PAUSE_RX)
2699 fc |= FW_PORT_CAP_FC_RX;
2700 if (lc->requested_fc & PAUSE_TX)
2701 fc |= FW_PORT_CAP_FC_TX;
2704 if (lc->requested_fec & FEC_RS)
2705 fec |= FW_PORT_CAP_FEC_RS;
2706 if (lc->requested_fec & FEC_BASER_RS)
2707 fec |= FW_PORT_CAP_FEC_BASER_RS;
2708 if (lc->requested_fec & FEC_RESERVED)
2709 fec |= FW_PORT_CAP_FEC_RESERVED;
2711 memset(&c, 0, sizeof(c));
2712 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2713 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2714 V_FW_PORT_CMD_PORTID(port));
2716 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
2719 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2720 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
2722 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2723 lc->fec = lc->requested_fec;
2724 } else if (lc->autoneg == AUTONEG_DISABLE) {
2725 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc |
2727 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
2728 lc->fec = lc->requested_fec;
2730 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | fec | mdi);
2733 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2737 * t4_flash_cfg_addr - return the address of the flash configuration file
2738 * @adapter: the adapter
2740 * Return the address within the flash where the Firmware Configuration
2741 * File is stored, or an error if the device FLASH is too small to contain
2742 * a Firmware Configuration File.
2744 int t4_flash_cfg_addr(struct adapter *adapter)
2747 * If the device FLASH isn't large enough to hold a Firmware
2748 * Configuration File, return an error.
2750 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
2753 return FLASH_CFG_START;
2756 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
2759 * t4_intr_enable - enable interrupts
2760 * @adapter: the adapter whose interrupts should be enabled
2762 * Enable PF-specific interrupts for the calling function and the top-level
2763 * interrupt concentrator for global interrupts. Interrupts are already
2764 * enabled at each module, here we just enable the roots of the interrupt
2767 * Note: this function should be called only when the driver manages
2768 * non PF-specific interrupts from the various HW modules. Only one PCI
2769 * function at a time should be doing this.
2771 void t4_intr_enable(struct adapter *adapter)
2774 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2775 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2776 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2778 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
2779 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
2780 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
2781 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
2782 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
2783 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
2784 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
2785 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
2786 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
2787 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
2788 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
2792 * t4_intr_disable - disable interrupts
2793 * @adapter: the adapter whose interrupts should be disabled
2795 * Disable interrupts. We only disable the top-level interrupt
2796 * concentrators. The caller must be a PCI function managing global
2799 void t4_intr_disable(struct adapter *adapter)
2801 u32 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2802 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
2803 G_SOURCEPF(whoami) : G_T6_SOURCEPF(whoami);
2805 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
2806 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
2810 * t4_get_port_type_description - return Port Type string description
2811 * @port_type: firmware Port Type enumeration
2813 const char *t4_get_port_type_description(enum fw_port_type port_type)
2815 static const char * const port_type_description[] = {
2840 if (port_type < ARRAY_SIZE(port_type_description))
2841 return port_type_description[port_type];
2846 * t4_get_mps_bg_map - return the buffer groups associated with a port
2847 * @adap: the adapter
2848 * @pidx: the port index
2850 * Returns a bitmap indicating which MPS buffer groups are associated
2851 * with the given port. Bit i is set if buffer group i is used by the
2854 unsigned int t4_get_mps_bg_map(struct adapter *adap, unsigned int pidx)
2856 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2857 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adap,
2860 if (pidx >= nports) {
2861 dev_warn(adap, "MPS Port Index %d >= Nports %d\n",
2866 switch (chip_version) {
2871 case 2: return 3 << (2 * pidx);
2872 case 4: return 1 << pidx;
2878 case 2: return 1 << (2 * pidx);
2883 dev_err(adap, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
2884 chip_version, nports);
2889 * t4_get_tp_ch_map - return TP ingress channels associated with a port
2890 * @adapter: the adapter
2891 * @pidx: the port index
2893 * Returns a bitmap indicating which TP Ingress Channels are associated with
2894 * a given Port. Bit i is set if TP Ingress Channel i is used by the Port.
2896 unsigned int t4_get_tp_ch_map(struct adapter *adapter, unsigned int pidx)
2898 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
2899 unsigned int nports = 1 << G_NUMPORTS(t4_read_reg(adapter,
2902 if (pidx >= nports) {
2903 dev_warn(adap, "TP Port Index %d >= Nports %d\n",
2908 switch (chip_version) {
2911 /* Note that this happens to be the same values as the MPS
2912 * Buffer Group Map for these Chips. But we replicate the code
2913 * here because they're really separate concepts.
2917 case 2: return 3 << (2 * pidx);
2918 case 4: return 1 << pidx;
2924 case 2: return 1 << pidx;
2929 dev_err(adapter, "Need TP Channel Map for Chip %0x, Nports %d\n",
2930 chip_version, nports);
2935 * t4_get_port_stats - collect port statistics
2936 * @adap: the adapter
2937 * @idx: the port index
2938 * @p: the stats structure to fill
2940 * Collect statistics related to the given port from HW.
2942 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2944 u32 bgmap = t4_get_mps_bg_map(adap, idx);
2945 u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
2947 #define GET_STAT(name) \
2948 t4_read_reg64(adap, \
2949 (is_t4(adap->params.chip) ? \
2950 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
2951 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
2952 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
2954 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2955 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2956 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2957 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2958 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2959 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2960 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2961 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2962 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2963 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2964 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2965 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2966 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2967 p->tx_drop = GET_STAT(TX_PORT_DROP);
2968 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2969 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2970 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2971 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2972 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2973 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2974 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2975 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2976 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2978 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
2979 if (stat_ctl & F_COUNTPAUSESTATTX) {
2980 p->tx_frames -= p->tx_pause;
2981 p->tx_octets -= p->tx_pause * 64;
2983 if (stat_ctl & F_COUNTPAUSEMCTX)
2984 p->tx_mcast_frames -= p->tx_pause;
2987 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2988 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2989 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2990 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2991 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2992 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2993 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2994 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2995 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2996 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2997 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2998 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2999 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
3000 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
3001 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
3002 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
3003 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
3004 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
3005 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
3006 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
3007 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
3008 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
3009 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
3010 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
3011 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
3012 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
3013 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
3015 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
3016 if (stat_ctl & F_COUNTPAUSESTATRX) {
3017 p->rx_frames -= p->rx_pause;
3018 p->rx_octets -= p->rx_pause * 64;
3020 if (stat_ctl & F_COUNTPAUSEMCRX)
3021 p->rx_mcast_frames -= p->rx_pause;
3024 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
3025 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
3026 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
3027 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
3028 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
3029 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
3030 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
3031 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
3038 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
3039 * @adap: The adapter
3041 * @stats: Current stats to fill
3042 * @offset: Previous stats snapshot
3044 void t4_get_port_stats_offset(struct adapter *adap, int idx,
3045 struct port_stats *stats,
3046 struct port_stats *offset)
3051 t4_get_port_stats(adap, idx, stats);
3052 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
3053 i < (sizeof(struct port_stats) / sizeof(u64));
3059 * t4_clr_port_stats - clear port statistics
3060 * @adap: the adapter
3061 * @idx: the port index
3063 * Clear HW statistics for the given port.
3065 void t4_clr_port_stats(struct adapter *adap, int idx)
3068 u32 bgmap = t4_get_mps_bg_map(adap, idx);
3071 if (is_t4(adap->params.chip))
3072 port_base_addr = PORT_BASE(idx);
3074 port_base_addr = T5_PORT_BASE(idx);
3076 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
3077 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
3078 t4_write_reg(adap, port_base_addr + i, 0);
3079 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
3080 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
3081 t4_write_reg(adap, port_base_addr + i, 0);
3082 for (i = 0; i < 4; i++)
3083 if (bgmap & (1 << i)) {
3085 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
3088 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
3094 * t4_fw_hello - establish communication with FW
3095 * @adap: the adapter
3096 * @mbox: mailbox to use for the FW command
3097 * @evt_mbox: mailbox to receive async FW events
3098 * @master: specifies the caller's willingness to be the device master
3099 * @state: returns the current device state (if non-NULL)
3101 * Issues a command to establish communication with FW. Returns either
3102 * an error (negative integer) or the mailbox of the Master PF.
3104 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
3105 enum dev_master master, enum dev_state *state)
3108 struct fw_hello_cmd c;
3110 unsigned int master_mbox;
3111 int retries = FW_CMD_HELLO_RETRIES;
3114 memset(&c, 0, sizeof(c));
3115 INIT_CMD(c, HELLO, WRITE);
3116 c.err_to_clearinit = cpu_to_be32(
3117 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
3118 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
3119 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
3120 M_FW_HELLO_CMD_MBMASTER) |
3121 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
3122 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
3123 F_FW_HELLO_CMD_CLEARINIT);
3126 * Issue the HELLO command to the firmware. If it's not successful
3127 * but indicates that we got a "busy" or "timeout" condition, retry
3128 * the HELLO until we exhaust our retry limit. If we do exceed our
3129 * retry limit, check to see if the firmware left us any error
3130 * information and report that if so ...
3132 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3133 if (ret != FW_SUCCESS) {
3134 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
3136 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
3137 t4_report_fw_error(adap);
3141 v = be32_to_cpu(c.err_to_clearinit);
3142 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
3144 if (v & F_FW_HELLO_CMD_ERR)
3145 *state = DEV_STATE_ERR;
3146 else if (v & F_FW_HELLO_CMD_INIT)
3147 *state = DEV_STATE_INIT;
3149 *state = DEV_STATE_UNINIT;
3153 * If we're not the Master PF then we need to wait around for the
3154 * Master PF Driver to finish setting up the adapter.
3156 * Note that we also do this wait if we're a non-Master-capable PF and
3157 * there is no current Master PF; a Master PF may show up momentarily
3158 * and we wouldn't want to fail pointlessly. (This can happen when an
3159 * OS loads lots of different drivers rapidly at the same time). In
3160 * this case, the Master PF returned by the firmware will be
3161 * M_PCIE_FW_MASTER so the test below will work ...
3163 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
3164 master_mbox != mbox) {
3165 int waiting = FW_CMD_HELLO_TIMEOUT;
3168 * Wait for the firmware to either indicate an error or
3169 * initialized state. If we see either of these we bail out
3170 * and report the issue to the caller. If we exhaust the
3171 * "hello timeout" and we haven't exhausted our retries, try
3172 * again. Otherwise bail with a timeout error.
3181 * If neither Error nor Initialialized are indicated
3182 * by the firmware keep waiting till we exaust our
3183 * timeout ... and then retry if we haven't exhausted
3186 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
3187 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
3198 * We either have an Error or Initialized condition
3199 * report errors preferentially.
3202 if (pcie_fw & F_PCIE_FW_ERR)
3203 *state = DEV_STATE_ERR;
3204 else if (pcie_fw & F_PCIE_FW_INIT)
3205 *state = DEV_STATE_INIT;
3209 * If we arrived before a Master PF was selected and
3210 * there's not a valid Master PF, grab its identity
3213 if (master_mbox == M_PCIE_FW_MASTER &&
3214 (pcie_fw & F_PCIE_FW_MASTER_VLD))
3215 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
3224 * t4_fw_bye - end communication with FW
3225 * @adap: the adapter
3226 * @mbox: mailbox to use for the FW command
3228 * Issues a command to terminate communication with FW.
3230 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
3232 struct fw_bye_cmd c;
3234 memset(&c, 0, sizeof(c));
3235 INIT_CMD(c, BYE, WRITE);
3236 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3240 * t4_fw_reset - issue a reset to FW
3241 * @adap: the adapter
3242 * @mbox: mailbox to use for the FW command
3243 * @reset: specifies the type of reset to perform
3245 * Issues a reset command of the specified type to FW.
3247 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
3249 struct fw_reset_cmd c;
3251 memset(&c, 0, sizeof(c));
3252 INIT_CMD(c, RESET, WRITE);
3253 c.val = cpu_to_be32(reset);
3254 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3258 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
3259 * @adap: the adapter
3260 * @mbox: mailbox to use for the FW RESET command (if desired)
3261 * @force: force uP into RESET even if FW RESET command fails
3263 * Issues a RESET command to firmware (if desired) with a HALT indication
3264 * and then puts the microprocessor into RESET state. The RESET command
3265 * will only be issued if a legitimate mailbox is provided (mbox <=
3266 * M_PCIE_FW_MASTER).
3268 * This is generally used in order for the host to safely manipulate the
3269 * adapter without fear of conflicting with whatever the firmware might
3270 * be doing. The only way out of this state is to RESTART the firmware
3273 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
3278 * If a legitimate mailbox is provided, issue a RESET command
3279 * with a HALT indication.
3281 if (mbox <= M_PCIE_FW_MASTER) {
3282 struct fw_reset_cmd c;
3284 memset(&c, 0, sizeof(c));
3285 INIT_CMD(c, RESET, WRITE);
3286 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
3287 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
3288 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3292 * Normally we won't complete the operation if the firmware RESET
3293 * command fails but if our caller insists we'll go ahead and put the
3294 * uP into RESET. This can be useful if the firmware is hung or even
3295 * missing ... We'll have to take the risk of putting the uP into
3296 * RESET without the cooperation of firmware in that case.
3298 * We also force the firmware's HALT flag to be on in case we bypassed
3299 * the firmware RESET command above or we're dealing with old firmware
3300 * which doesn't have the HALT capability. This will serve as a flag
3301 * for the incoming firmware to know that it's coming out of a HALT
3302 * rather than a RESET ... if it's new enough to understand that ...
3304 if (ret == 0 || force) {
3305 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
3306 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
3311 * And we always return the result of the firmware RESET command
3312 * even when we force the uP into RESET ...
3318 * t4_fw_restart - restart the firmware by taking the uP out of RESET
3319 * @adap: the adapter
3320 * @mbox: mailbox to use for the FW RESET command (if desired)
3321 * @reset: if we want to do a RESET to restart things
3323 * Restart firmware previously halted by t4_fw_halt(). On successful
3324 * return the previous PF Master remains as the new PF Master and there
3325 * is no need to issue a new HELLO command, etc.
3327 * We do this in two ways:
3329 * 1. If we're dealing with newer firmware we'll simply want to take
3330 * the chip's microprocessor out of RESET. This will cause the
3331 * firmware to start up from its start vector. And then we'll loop
3332 * until the firmware indicates it's started again (PCIE_FW.HALT
3333 * reset to 0) or we timeout.
3335 * 2. If we're dealing with older firmware then we'll need to RESET
3336 * the chip since older firmware won't recognize the PCIE_FW.HALT
3337 * flag and automatically RESET itself on startup.
3339 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3343 * Since we're directing the RESET instead of the firmware
3344 * doing it automatically, we need to clear the PCIE_FW.HALT
3347 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
3350 * If we've been given a valid mailbox, first try to get the
3351 * firmware to do the RESET. If that works, great and we can
3352 * return success. Otherwise, if we haven't been given a
3353 * valid mailbox or the RESET command failed, fall back to
3354 * hitting the chip with a hammer.
3356 if (mbox <= M_PCIE_FW_MASTER) {
3357 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3359 if (t4_fw_reset(adap, mbox,
3360 F_PIORST | F_PIORSTMODE) == 0)
3364 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
3369 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
3370 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3371 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
3382 * t4_fl_pkt_align - return the fl packet alignment
3383 * @adap: the adapter
3385 * T4 has a single field to specify the packing and padding boundary.
3386 * T5 onwards has separate fields for this and hence the alignment for
3387 * next packet offset is maximum of these two.
3389 int t4_fl_pkt_align(struct adapter *adap)
3391 u32 sge_control, sge_control2;
3392 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
3394 sge_control = t4_read_reg(adap, A_SGE_CONTROL);
3396 /* T4 uses a single control field to specify both the PCIe Padding and
3397 * Packing Boundary. T5 introduced the ability to specify these
3398 * separately. The actual Ingress Packet Data alignment boundary
3399 * within Packed Buffer Mode is the maximum of these two
3402 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
3403 ingpad_shift = X_INGPADBOUNDARY_SHIFT;
3405 ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
3407 ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
3409 fl_align = ingpadboundary;
3410 if (!is_t4(adap->params.chip)) {
3411 sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
3412 ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
3413 if (ingpackboundary == X_INGPACKBOUNDARY_16B)
3414 ingpackboundary = 16;
3416 ingpackboundary = 1 << (ingpackboundary +
3417 X_INGPACKBOUNDARY_SHIFT);
3419 fl_align = max(ingpadboundary, ingpackboundary);
3425 * t4_fixup_host_params_compat - fix up host-dependent parameters
3426 * @adap: the adapter
3427 * @page_size: the host's Base Page Size
3428 * @cache_line_size: the host's Cache Line Size
3429 * @chip_compat: maintain compatibility with designated chip
3431 * Various registers in the chip contain values which are dependent on the
3432 * host's Base Page and Cache Line Sizes. This function will fix all of
3433 * those registers with the appropriate values as passed in ...
3435 * @chip_compat is used to limit the set of changes that are made
3436 * to be compatible with the indicated chip release. This is used by
3437 * drivers to maintain compatibility with chip register settings when
3438 * the drivers haven't [yet] been updated with new chip support.
3440 int t4_fixup_host_params_compat(struct adapter *adap,
3441 unsigned int page_size,
3442 unsigned int cache_line_size,
3443 enum chip_type chip_compat)
3445 unsigned int page_shift = cxgbe_fls(page_size) - 1;
3446 unsigned int sge_hps = page_shift - 10;
3447 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3448 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3449 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
3451 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
3452 V_HOSTPAGESIZEPF0(sge_hps) |
3453 V_HOSTPAGESIZEPF1(sge_hps) |
3454 V_HOSTPAGESIZEPF2(sge_hps) |
3455 V_HOSTPAGESIZEPF3(sge_hps) |
3456 V_HOSTPAGESIZEPF4(sge_hps) |
3457 V_HOSTPAGESIZEPF5(sge_hps) |
3458 V_HOSTPAGESIZEPF6(sge_hps) |
3459 V_HOSTPAGESIZEPF7(sge_hps));
3461 if (is_t4(adap->params.chip) || is_t4(chip_compat))
3462 t4_set_reg_field(adap, A_SGE_CONTROL,
3463 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3464 F_EGRSTATUSPAGESIZE,
3465 V_INGPADBOUNDARY(fl_align_log -
3466 X_INGPADBOUNDARY_SHIFT) |
3467 V_EGRSTATUSPAGESIZE(stat_len != 64));
3469 unsigned int pack_align;
3470 unsigned int ingpad, ingpack;
3471 unsigned int pcie_cap;
3474 * T5 introduced the separation of the Free List Padding and
3475 * Packing Boundaries. Thus, we can select a smaller Padding
3476 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3477 * Bandwidth, and use a Packing Boundary which is large enough
3478 * to avoid false sharing between CPUs, etc.
3480 * For the PCI Link, the smaller the Padding Boundary the
3481 * better. For the Memory Controller, a smaller Padding
3482 * Boundary is better until we cross under the Memory Line
3483 * Size (the minimum unit of transfer to/from Memory). If we
3484 * have a Padding Boundary which is smaller than the Memory
3485 * Line Size, that'll involve a Read-Modify-Write cycle on the
3486 * Memory Controller which is never good.
3489 /* We want the Packing Boundary to be based on the Cache Line
3490 * Size in order to help avoid False Sharing performance
3491 * issues between CPUs, etc. We also want the Packing
3492 * Boundary to incorporate the PCI-E Maximum Payload Size. We
3493 * get best performance when the Packing Boundary is a
3494 * multiple of the Maximum Payload Size.
3496 pack_align = fl_align;
3497 pcie_cap = t4_os_find_pci_capability(adap, PCI_CAP_ID_EXP);
3499 unsigned int mps, mps_log;
3502 /* The PCIe Device Control Maximum Payload Size field
3503 * [bits 7:5] encodes sizes as powers of 2 starting at
3506 t4_os_pci_read_cfg2(adap, pcie_cap + PCI_EXP_DEVCTL,
3508 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
3510 if (mps > pack_align)
3515 * N.B. T5 has a different interpretation of the "0" value for
3516 * the Packing Boundary. This corresponds to 16 bytes instead
3517 * of the expected 32 bytes. We never have a Packing Boundary
3518 * less than 32 bytes so we can't use that special value but
3519 * on the other hand, if we wanted 32 bytes, the best we can
3520 * really do is 64 bytes ...
3522 if (pack_align <= 16) {
3523 ingpack = X_INGPACKBOUNDARY_16B;
3525 } else if (pack_align == 32) {
3526 ingpack = X_INGPACKBOUNDARY_64B;
3529 unsigned int pack_align_log = cxgbe_fls(pack_align) - 1;
3531 ingpack = pack_align_log - X_INGPACKBOUNDARY_SHIFT;
3532 fl_align = pack_align;
3535 /* Use the smallest Ingress Padding which isn't smaller than
3536 * the Memory Controller Read/Write Size. We'll take that as
3537 * being 8 bytes since we don't know of any system with a
3538 * wider Memory Controller Bus Width.
3540 if (is_t5(adap->params.chip))
3541 ingpad = X_INGPADBOUNDARY_32B;
3543 ingpad = X_T6_INGPADBOUNDARY_8B;
3544 t4_set_reg_field(adap, A_SGE_CONTROL,
3545 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
3546 F_EGRSTATUSPAGESIZE,
3547 V_INGPADBOUNDARY(ingpad) |
3548 V_EGRSTATUSPAGESIZE(stat_len != 64));
3549 t4_set_reg_field(adap, A_SGE_CONTROL2,
3550 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
3551 V_INGPACKBOUNDARY(ingpack));
3555 * Adjust various SGE Free List Host Buffer Sizes.
3557 * The first four entries are:
3561 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3562 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3564 * For the single-MTU buffers in unpacked mode we need to include
3565 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3566 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3567 * Padding boundary. All of these are accommodated in the Factory
3568 * Default Firmware Configuration File but we need to adjust it for
3569 * this host's cache line size.
3571 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
3572 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
3573 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
3575 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
3576 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
3579 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
3585 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
3586 * @adap: the adapter
3587 * @page_size: the host's Base Page Size
3588 * @cache_line_size: the host's Cache Line Size
3590 * Various registers in T4 contain values which are dependent on the
3591 * host's Base Page and Cache Line Sizes. This function will fix all of
3592 * those registers with the appropriate values as passed in ...
3594 * This routine makes changes which are compatible with T4 chips.
3596 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3597 unsigned int cache_line_size)
3599 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
3604 * t4_fw_initialize - ask FW to initialize the device
3605 * @adap: the adapter
3606 * @mbox: mailbox to use for the FW command
3608 * Issues a command to FW to partially initialize the device. This
3609 * performs initialization that generally doesn't depend on user input.
3611 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3613 struct fw_initialize_cmd c;
3615 memset(&c, 0, sizeof(c));
3616 INIT_CMD(c, INITIALIZE, WRITE);
3617 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3621 * t4_query_params_rw - query FW or device parameters
3622 * @adap: the adapter
3623 * @mbox: mailbox to use for the FW command
3626 * @nparams: the number of parameters
3627 * @params: the parameter names
3628 * @val: the parameter values
3629 * @rw: Write and read flag
3631 * Reads the value of FW or device parameters. Up to 7 parameters can be
3634 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
3635 unsigned int pf, unsigned int vf,
3636 unsigned int nparams, const u32 *params,
3641 struct fw_params_cmd c;
3642 __be32 *p = &c.param[0].mnem;
3647 memset(&c, 0, sizeof(c));
3648 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3649 F_FW_CMD_REQUEST | F_FW_CMD_READ |
3650 V_FW_PARAMS_CMD_PFN(pf) |
3651 V_FW_PARAMS_CMD_VFN(vf));
3652 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3654 for (i = 0; i < nparams; i++) {
3655 *p++ = cpu_to_be32(*params++);
3657 *p = cpu_to_be32(*(val + i));
3661 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3663 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3664 *val++ = be32_to_cpu(*p);
3668 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3669 unsigned int vf, unsigned int nparams, const u32 *params,
3672 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
3676 * t4_set_params_timeout - sets FW or device parameters
3677 * @adap: the adapter
3678 * @mbox: mailbox to use for the FW command
3681 * @nparams: the number of parameters
3682 * @params: the parameter names
3683 * @val: the parameter values
3684 * @timeout: the timeout time
3686 * Sets the value of FW or device parameters. Up to 7 parameters can be
3687 * specified at once.
3689 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
3690 unsigned int pf, unsigned int vf,
3691 unsigned int nparams, const u32 *params,
3692 const u32 *val, int timeout)
3694 struct fw_params_cmd c;
3695 __be32 *p = &c.param[0].mnem;
3700 memset(&c, 0, sizeof(c));
3701 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
3702 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3703 V_FW_PARAMS_CMD_PFN(pf) |
3704 V_FW_PARAMS_CMD_VFN(vf));
3705 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3708 *p++ = cpu_to_be32(*params++);
3709 *p++ = cpu_to_be32(*val++);
3712 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
3715 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3716 unsigned int vf, unsigned int nparams, const u32 *params,
3719 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
3720 FW_CMD_MAX_TIMEOUT);
3724 * t4_alloc_vi_func - allocate a virtual interface
3725 * @adap: the adapter
3726 * @mbox: mailbox to use for the FW command
3727 * @port: physical port associated with the VI
3728 * @pf: the PF owning the VI
3729 * @vf: the VF owning the VI
3730 * @nmac: number of MAC addresses needed (1 to 5)
3731 * @mac: the MAC addresses of the VI
3732 * @rss_size: size of RSS table slice associated with this VI
3733 * @portfunc: which Port Application Function MAC Address is desired
3734 * @idstype: Intrusion Detection Type
3736 * Allocates a virtual interface for the given physical port. If @mac is
3737 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3738 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3739 * stored consecutively so the space needed is @nmac * 6 bytes.
3740 * Returns a negative error number or the non-negative VI id.
3742 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
3743 unsigned int port, unsigned int pf, unsigned int vf,
3744 unsigned int nmac, u8 *mac, unsigned int *rss_size,
3745 unsigned int portfunc, unsigned int idstype)
3750 memset(&c, 0, sizeof(c));
3751 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3752 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
3753 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
3754 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
3755 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
3756 V_FW_VI_CMD_FUNC(portfunc));
3757 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
3760 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3765 memcpy(mac, c.mac, sizeof(c.mac));
3768 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3771 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3774 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3777 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3782 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
3783 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
3787 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
3788 * @adap: the adapter
3789 * @mbox: mailbox to use for the FW command
3790 * @port: physical port associated with the VI
3791 * @pf: the PF owning the VI
3792 * @vf: the VF owning the VI
3793 * @nmac: number of MAC addresses needed (1 to 5)
3794 * @mac: the MAC addresses of the VI
3795 * @rss_size: size of RSS table slice associated with this VI
3797 * Backwards compatible and convieniance routine to allocate a Virtual
3798 * Interface with a Ethernet Port Application Function and Intrustion
3799 * Detection System disabled.
3801 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3802 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3803 unsigned int *rss_size)
3805 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
3810 * t4_free_vi - free a virtual interface
3811 * @adap: the adapter
3812 * @mbox: mailbox to use for the FW command
3813 * @pf: the PF owning the VI
3814 * @vf: the VF owning the VI
3815 * @viid: virtual interface identifiler
3817 * Free a previously allocated virtual interface.
3819 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
3820 unsigned int vf, unsigned int viid)
3824 memset(&c, 0, sizeof(c));
3825 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
3826 F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
3827 V_FW_VI_CMD_VFN(vf));
3828 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
3829 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
3831 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3835 * t4_set_rxmode - set Rx properties of a virtual interface
3836 * @adap: the adapter
3837 * @mbox: mailbox to use for the FW command
3839 * @mtu: the new MTU or -1
3840 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3841 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3842 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3843 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
3845 * @sleep_ok: if true we may sleep while awaiting command completion
3847 * Sets Rx properties of a virtual interface.
3849 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3850 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3853 struct fw_vi_rxmode_cmd c;
3855 /* convert to FW values */
3857 mtu = M_FW_VI_RXMODE_CMD_MTU;
3859 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
3861 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
3863 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
3865 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
3867 memset(&c, 0, sizeof(c));
3868 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
3869 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3870 V_FW_VI_RXMODE_CMD_VIID(viid));
3871 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3872 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
3873 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3874 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3875 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3876 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3877 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3881 * t4_change_mac - modifies the exact-match filter for a MAC address
3882 * @adap: the adapter
3883 * @mbox: mailbox to use for the FW command
3885 * @idx: index of existing filter for old value of MAC address, or -1
3886 * @addr: the new MAC address value
3887 * @persist: whether a new MAC allocation should be persistent
3888 * @add_smt: if true also add the address to the HW SMT
3890 * Modifies an exact-match filter and sets it to the new MAC address if
3891 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
3892 * latter case the address is added persistently if @persist is %true.
3894 * Note that in general it is not possible to modify the value of a given
3895 * filter so the generic way to modify an address filter is to free the one
3896 * being used by the old address value and allocate a new filter for the
3897 * new address value.
3899 * Returns a negative error number or the index of the filter with the new
3900 * MAC value. Note that this index may differ from @idx.
3902 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3903 int idx, const u8 *addr, bool persist, bool add_smt)
3906 struct fw_vi_mac_cmd c;
3907 struct fw_vi_mac_exact *p = c.u.exact;
3908 int max_mac_addr = adap->params.arch.mps_tcam_size;
3910 if (idx < 0) /* new allocation */
3911 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3912 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3914 memset(&c, 0, sizeof(c));
3915 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
3916 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
3917 V_FW_VI_MAC_CMD_VIID(viid));
3918 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
3919 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
3920 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3921 V_FW_VI_MAC_CMD_IDX(idx));
3922 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3924 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3926 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
3927 if (ret >= max_mac_addr)
3934 * t4_enable_vi_params - enable/disable a virtual interface
3935 * @adap: the adapter
3936 * @mbox: mailbox to use for the FW command
3938 * @rx_en: 1=enable Rx, 0=disable Rx
3939 * @tx_en: 1=enable Tx, 0=disable Tx
3940 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3942 * Enables/disables a virtual interface. Note that setting DCB Enable
3943 * only makes sense when enabling a Virtual Interface ...
3945 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3946 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3948 struct fw_vi_enable_cmd c;
3950 memset(&c, 0, sizeof(c));
3951 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
3952 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
3953 V_FW_VI_ENABLE_CMD_VIID(viid));
3954 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
3955 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
3956 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
3958 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3962 * t4_enable_vi - enable/disable a virtual interface
3963 * @adap: the adapter
3964 * @mbox: mailbox to use for the FW command
3966 * @rx_en: 1=enable Rx, 0=disable Rx
3967 * @tx_en: 1=enable Tx, 0=disable Tx
3969 * Enables/disables a virtual interface. Note that setting DCB Enable
3970 * only makes sense when enabling a Virtual Interface ...
3972 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3973 bool rx_en, bool tx_en)
3975 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3979 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
3980 * @adap: the adapter
3981 * @mbox: mailbox to use for the FW command
3982 * @start: %true to enable the queues, %false to disable them
3983 * @pf: the PF owning the queues
3984 * @vf: the VF owning the queues
3985 * @iqid: ingress queue id
3986 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3987 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3989 * Starts or stops an ingress queue and its associated FLs, if any.
3991 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
3992 unsigned int pf, unsigned int vf, unsigned int iqid,
3993 unsigned int fl0id, unsigned int fl1id)
3997 memset(&c, 0, sizeof(c));
3998 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
3999 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4000 V_FW_IQ_CMD_VFN(vf));
4001 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
4002 V_FW_IQ_CMD_IQSTOP(!start) |
4004 c.iqid = cpu_to_be16(iqid);
4005 c.fl0id = cpu_to_be16(fl0id);
4006 c.fl1id = cpu_to_be16(fl1id);
4007 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4011 * t4_iq_free - free an ingress queue and its FLs
4012 * @adap: the adapter
4013 * @mbox: mailbox to use for the FW command
4014 * @pf: the PF owning the queues
4015 * @vf: the VF owning the queues
4016 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
4017 * @iqid: ingress queue id
4018 * @fl0id: FL0 queue id or 0xffff if no attached FL0
4019 * @fl1id: FL1 queue id or 0xffff if no attached FL1
4021 * Frees an ingress queue and its associated FLs, if any.
4023 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4024 unsigned int vf, unsigned int iqtype, unsigned int iqid,
4025 unsigned int fl0id, unsigned int fl1id)
4029 memset(&c, 0, sizeof(c));
4030 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
4031 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
4032 V_FW_IQ_CMD_VFN(vf));
4033 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
4034 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
4035 c.iqid = cpu_to_be16(iqid);
4036 c.fl0id = cpu_to_be16(fl0id);
4037 c.fl1id = cpu_to_be16(fl1id);
4038 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4042 * t4_eth_eq_free - free an Ethernet egress queue
4043 * @adap: the adapter
4044 * @mbox: mailbox to use for the FW command
4045 * @pf: the PF owning the queue
4046 * @vf: the VF owning the queue
4047 * @eqid: egress queue id
4049 * Frees an Ethernet egress queue.
4051 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
4052 unsigned int vf, unsigned int eqid)
4054 struct fw_eq_eth_cmd c;
4056 memset(&c, 0, sizeof(c));
4057 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
4058 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
4059 V_FW_EQ_ETH_CMD_PFN(pf) |
4060 V_FW_EQ_ETH_CMD_VFN(vf));
4061 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
4062 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
4063 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4067 * t4_handle_fw_rpl - process a FW reply message
4068 * @adap: the adapter
4069 * @rpl: start of the FW message
4071 * Processes a FW message, such as link state change messages.
4073 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
4075 u8 opcode = *(const u8 *)rpl;
4078 * This might be a port command ... this simplifies the following
4079 * conditionals ... We can get away with pre-dereferencing
4080 * action_to_len16 because it's in the first 16 bytes and all messages
4081 * will be at least that long.
4083 const struct fw_port_cmd *p = (const void *)rpl;
4084 unsigned int action =
4085 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
4087 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
4088 /* link/module state change message */
4089 unsigned int speed = 0, fc = 0, i;
4090 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
4091 struct port_info *pi = NULL;
4092 struct link_config *lc;
4093 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
4094 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
4095 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
4097 if (stat & F_FW_PORT_CMD_RXPAUSE)
4099 if (stat & F_FW_PORT_CMD_TXPAUSE)
4101 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
4102 speed = ETH_SPEED_NUM_100M;
4103 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
4104 speed = ETH_SPEED_NUM_1G;
4105 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
4106 speed = ETH_SPEED_NUM_10G;
4107 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_25G))
4108 speed = ETH_SPEED_NUM_25G;
4109 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
4110 speed = ETH_SPEED_NUM_40G;
4111 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100G))
4112 speed = ETH_SPEED_NUM_100G;
4114 for_each_port(adap, i) {
4115 pi = adap2pinfo(adap, i);
4116 if (pi->tx_chan == chan)
4121 if (mod != pi->mod_type) {
4123 t4_os_portmod_changed(adap, i);
4125 if (link_ok != lc->link_ok || speed != lc->speed ||
4126 fc != lc->fc) { /* something changed */
4127 if (!link_ok && lc->link_ok) {
4128 static const char * const reason[] = {
4131 "Auto-negotiation Failure",
4133 "Insufficient Airflow",
4134 "Unable To Determine Reason",
4135 "No RX Signal Detected",
4138 unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
4140 dev_warn(adap, "Port %d link down, reason: %s\n",
4143 lc->link_ok = link_ok;
4146 lc->supported = be16_to_cpu(p->u.info.pcap);
4149 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
4155 void t4_reset_link_config(struct adapter *adap, int idx)
4157 struct port_info *pi = adap2pinfo(adap, idx);
4158 struct link_config *lc = &pi->link_cfg;
4161 lc->requested_speed = 0;
4162 lc->requested_fc = 0;
4168 * init_link_config - initialize a link's SW state
4169 * @lc: structure holding the link state
4170 * @pcaps: link Port Capabilities
4171 * @acaps: link current Advertised Port Capabilities
4173 * Initializes the SW state maintained for each link, including the link's
4174 * capabilities and default speed/flow-control/autonegotiation settings.
4176 static void init_link_config(struct link_config *lc, unsigned int pcaps,
4181 lc->supported = pcaps;
4182 lc->requested_speed = 0;
4184 lc->requested_fc = 0;
4188 * For Forward Error Control, we default to whatever the Firmware
4189 * tells us the Link is currently advertising.
4192 if (acaps & FW_PORT_CAP_FEC_RS)
4194 if (acaps & FW_PORT_CAP_FEC_BASER_RS)
4195 fec |= FEC_BASER_RS;
4196 if (acaps & FW_PORT_CAP_FEC_RESERVED)
4197 fec |= FEC_RESERVED;
4198 lc->requested_fec = fec;
4201 if (lc->supported & FW_PORT_CAP_ANEG) {
4202 lc->advertising = lc->supported & ADVERT_MASK;
4203 lc->autoneg = AUTONEG_ENABLE;
4205 lc->advertising = 0;
4206 lc->autoneg = AUTONEG_DISABLE;
4211 * t4_wait_dev_ready - wait till to reads of registers work
4213 * Right after the device is RESET is can take a small amount of time
4214 * for it to respond to register reads. Until then, all reads will
4215 * return either 0xff...ff or 0xee...ee. Return an error if reads
4216 * don't work within a reasonable time frame.
4218 static int t4_wait_dev_ready(struct adapter *adapter)
4222 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4224 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4228 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
4229 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
4232 dev_err(adapter, "Device didn't become ready for access, whoami = %#x\n",
4238 u32 vendor_and_model_id;
4242 int t4_get_flash_params(struct adapter *adapter)
4245 * Table for non-Numonix supported flash parts. Numonix parts are left
4246 * to the preexisting well-tested code. All flash parts have 64KB
4249 static struct flash_desc supported_flash[] = {
4250 { 0x00150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
4255 unsigned int part, manufacturer;
4256 unsigned int density, size;
4259 * Issue a Read ID Command to the Flash part. We decode supported
4260 * Flash parts and their sizes from this. There's a newer Query
4261 * Command which can retrieve detailed geometry information but
4262 * many Flash parts don't support it.
4264 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
4266 ret = sf1_read(adapter, 3, 0, 1, &flashid);
4267 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
4271 for (part = 0; part < ARRAY_SIZE(supported_flash); part++) {
4272 if (supported_flash[part].vendor_and_model_id == flashid) {
4273 adapter->params.sf_size =
4274 supported_flash[part].size_mb;
4275 adapter->params.sf_nsec =
4276 adapter->params.sf_size / SF_SEC_SIZE;
4281 manufacturer = flashid & 0xff;
4282 switch (manufacturer) {
4283 case 0x20: { /* Micron/Numonix */
4285 * This Density -> Size decoding table is taken from Micron
4288 density = (flashid >> 16) & 0xff;
4291 size = 1 << 20; /* 1MB */
4294 size = 1 << 21; /* 2MB */
4297 size = 1 << 22; /* 4MB */
4300 size = 1 << 23; /* 8MB */
4303 size = 1 << 24; /* 16MB */
4306 size = 1 << 25; /* 32MB */
4309 size = 1 << 26; /* 64MB */
4312 size = 1 << 27; /* 128MB */
4315 size = 1 << 28; /* 256MB */
4318 dev_err(adapter, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
4323 adapter->params.sf_size = size;
4324 adapter->params.sf_nsec = size / SF_SEC_SIZE;
4328 dev_err(adapter, "Unsupported Flash Part, ID = %#x\n", flashid);
4334 * We should reject adapters with FLASHes which are too small. So, emit
4337 if (adapter->params.sf_size < FLASH_MIN_SIZE)
4338 dev_warn(adapter, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
4339 flashid, adapter->params.sf_size, FLASH_MIN_SIZE);
4344 static void set_pcie_completion_timeout(struct adapter *adapter,
4350 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
4352 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
4355 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
4360 * t4_get_chip_type - Determine chip type from device ID
4361 * @adap: the adapter
4362 * @ver: adapter version
4364 int t4_get_chip_type(struct adapter *adap, int ver)
4366 enum chip_type chip = 0;
4367 u32 pl_rev = G_REV(t4_read_reg(adap, A_PL_REV));
4369 /* Retrieve adapter's device ID */
4372 chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4375 chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4378 dev_err(adap, "Device %d is not supported\n",
4379 adap->params.pci.device_id);
4387 * t4_prep_adapter - prepare SW and HW for operation
4388 * @adapter: the adapter
4390 * Initialize adapter SW state for the various HW modules, set initial
4391 * values for some adapter tunables, take PHYs out of reset, and
4392 * initialize the MDIO interface.
4394 int t4_prep_adapter(struct adapter *adapter)
4399 ret = t4_wait_dev_ready(adapter);
4403 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
4404 adapter->params.pci.device_id = adapter->pdev->id.device_id;
4405 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
4408 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
4409 * ADAPTER (VERSION << 4 | REVISION)
4411 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
4412 adapter->params.chip = 0;
4415 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
4416 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
4417 adapter->params.arch.mps_tcam_size =
4418 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4419 adapter->params.arch.mps_rplc_size = 128;
4420 adapter->params.arch.nchan = NCHAN;
4421 adapter->params.arch.vfcount = 128;
4424 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
4425 adapter->params.arch.sge_fl_db = 0;
4426 adapter->params.arch.mps_tcam_size =
4427 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
4428 adapter->params.arch.mps_rplc_size = 256;
4429 adapter->params.arch.nchan = 2;
4430 adapter->params.arch.vfcount = 256;
4433 dev_err(adapter, "%s: Device %d is not supported\n",
4434 __func__, adapter->params.pci.device_id);
4438 adapter->params.pci.vpd_cap_addr =
4439 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
4441 ret = t4_get_flash_params(adapter);
4443 dev_err(adapter, "Unable to retrieve Flash Parameters, ret = %d\n",
4448 adapter->params.cim_la_size = CIMLA_SIZE;
4450 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
4453 * Default port and clock for debugging in case we can't reach FW.
4455 adapter->params.nports = 1;
4456 adapter->params.portvec = 1;
4457 adapter->params.vpd.cclk = 50000;
4459 /* Set pci completion timeout value to 4 seconds. */
4460 set_pcie_completion_timeout(adapter, 0xd);
4465 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
4466 * @adapter: the adapter
4467 * @qid: the Queue ID
4468 * @qtype: the Ingress or Egress type for @qid
4469 * @pbar2_qoffset: BAR2 Queue Offset
4470 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
4472 * Returns the BAR2 SGE Queue Registers information associated with the
4473 * indicated Absolute Queue ID. These are passed back in return value
4474 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
4475 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
4477 * This may return an error which indicates that BAR2 SGE Queue
4478 * registers aren't available. If an error is not returned, then the
4479 * following values are returned:
4481 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
4482 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
4484 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
4485 * require the "Inferred Queue ID" ability may be used. E.g. the
4486 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
4487 * then these "Inferred Queue ID" register may not be used.
4489 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
4490 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
4491 unsigned int *pbar2_qid)
4493 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
4494 u64 bar2_page_offset, bar2_qoffset;
4495 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
4498 * T4 doesn't support BAR2 SGE Queue registers.
4500 if (is_t4(adapter->params.chip))
4504 * Get our SGE Page Size parameters.
4506 page_shift = adapter->params.sge.hps + 10;
4507 page_size = 1 << page_shift;
4510 * Get the right Queues per Page parameters for our Queue.
4512 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
4513 adapter->params.sge.eq_qpp :
4514 adapter->params.sge.iq_qpp);
4515 qpp_mask = (1 << qpp_shift) - 1;
4518 * Calculate the basics of the BAR2 SGE Queue register area:
4519 * o The BAR2 page the Queue registers will be in.
4520 * o The BAR2 Queue ID.
4521 * o The BAR2 Queue ID Offset into the BAR2 page.
4523 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
4524 bar2_qid = qid & qpp_mask;
4525 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
4528 * If the BAR2 Queue ID Offset is less than the Page Size, then the
4529 * hardware will infer the Absolute Queue ID simply from the writes to
4530 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
4531 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
4532 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
4533 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
4534 * from the BAR2 Page and BAR2 Queue ID.
4536 * One important censequence of this is that some BAR2 SGE registers
4537 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
4538 * there. But other registers synthesize the SGE Queue ID purely
4539 * from the writes to the registers -- the Write Combined Doorbell
4540 * Buffer is a good example. These BAR2 SGE Registers are only
4541 * available for those BAR2 SGE Register areas where the SGE Absolute
4542 * Queue ID can be inferred from simple writes.
4544 bar2_qoffset = bar2_page_offset;
4545 bar2_qinferred = (bar2_qid_offset < page_size);
4546 if (bar2_qinferred) {
4547 bar2_qoffset += bar2_qid_offset;
4551 *pbar2_qoffset = bar2_qoffset;
4552 *pbar2_qid = bar2_qid;
4557 * t4_init_sge_params - initialize adap->params.sge
4558 * @adapter: the adapter
4560 * Initialize various fields of the adapter's SGE Parameters structure.
4562 int t4_init_sge_params(struct adapter *adapter)
4564 struct sge_params *sge_params = &adapter->params.sge;
4566 unsigned int s_hps, s_qpp;
4569 * Extract the SGE Page Size for our PF.
4571 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
4572 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
4574 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
4577 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
4579 s_qpp = (S_QUEUESPERPAGEPF0 +
4580 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
4581 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
4582 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4583 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
4584 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
4590 * t4_init_tp_params - initialize adap->params.tp
4591 * @adap: the adapter
4593 * Initialize various fields of the adapter's TP Parameters structure.
4595 int t4_init_tp_params(struct adapter *adap)
4600 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
4601 adap->params.tp.tre = G_TIMERRESOLUTION(v);
4602 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
4604 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4605 for (chan = 0; chan < NCHAN; chan++)
4606 adap->params.tp.tx_modq[chan] = chan;
4609 * Cache the adapter's Compressed Filter Mode and global Incress
4612 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4613 &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
4614 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
4615 &adap->params.tp.ingress_config, 1,
4616 A_TP_INGRESS_CONFIG);
4618 /* For T6, cache the adapter's compressed error vector
4619 * and passing outer header info for encapsulated packets.
4621 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4622 v = t4_read_reg(adap, A_TP_OUT_CONFIG);
4623 adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
4627 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
4628 * shift positions of several elements of the Compressed Filter Tuple
4629 * for this adapter which we need frequently ...
4631 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4632 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4633 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4634 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4638 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4639 * represents the presense of an Outer VLAN instead of a VNIC ID.
4641 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4642 adap->params.tp.vnic_shift = -1;
4648 * t4_filter_field_shift - calculate filter field shift
4649 * @adap: the adapter
4650 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4652 * Return the shift position of a filter field within the Compressed
4653 * Filter Tuple. The filter field is specified via its selection bit
4654 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4656 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
4658 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4662 if ((filter_mode & filter_sel) == 0)
4665 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4666 switch (filter_mode & sel) {
4668 field_shift += W_FT_FCOE;
4671 field_shift += W_FT_PORT;
4674 field_shift += W_FT_VNIC_ID;
4677 field_shift += W_FT_VLAN;
4680 field_shift += W_FT_TOS;
4683 field_shift += W_FT_PROTOCOL;
4686 field_shift += W_FT_ETHERTYPE;
4689 field_shift += W_FT_MACMATCH;
4692 field_shift += W_FT_MPSHITTYPE;
4694 case F_FRAGMENTATION:
4695 field_shift += W_FT_FRAGMENTATION;
4702 int t4_init_rss_mode(struct adapter *adap, int mbox)
4705 struct fw_rss_vi_config_cmd rvc;
4707 memset(&rvc, 0, sizeof(rvc));
4709 for_each_port(adap, i) {
4710 struct port_info *p = adap2pinfo(adap, i);
4712 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4713 F_FW_CMD_REQUEST | F_FW_CMD_READ |
4714 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4715 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4716 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4719 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4724 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4728 struct fw_port_cmd c;
4730 memset(&c, 0, sizeof(c));
4732 for_each_port(adap, i) {
4733 unsigned int rss_size = 0;
4734 struct port_info *p = adap2pinfo(adap, i);
4736 while ((adap->params.portvec & (1 << j)) == 0)
4739 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
4740 F_FW_CMD_REQUEST | F_FW_CMD_READ |
4741 V_FW_PORT_CMD_PORTID(j));
4742 c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
4743 FW_PORT_ACTION_GET_PORT_INFO) |
4745 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4749 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4755 p->rss_size = rss_size;
4756 t4_os_set_hw_addr(adap, i, addr);
4758 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
4759 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
4760 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
4761 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
4762 p->mod_type = FW_PORT_MOD_TYPE_NA;
4764 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap),
4765 be16_to_cpu(c.u.info.acap));