4 * Copyright(c) 2014-2016 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
36 #include <rte_interrupts.h>
38 #include <rte_debug.h>
40 #include <rte_atomic.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_tailq.h>
46 #include <rte_alarm.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_atomic.h>
50 #include <rte_malloc.h>
51 #include <rte_random.h>
53 #include <rte_byteorder.h>
57 #include "t4_regs_values.h"
58 #include "t4fw_interface.h"
60 static void init_link_config(struct link_config *lc, unsigned int caps);
63 * t4_read_mtu_tbl - returns the values in the HW path MTU table
65 * @mtus: where to store the MTU values
66 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
68 * Reads the HW path MTU table.
70 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
75 for (i = 0; i < NMTUS; ++i) {
76 t4_write_reg(adap, A_TP_MTU_TABLE,
77 V_MTUINDEX(0xff) | V_MTUVALUE(i));
78 v = t4_read_reg(adap, A_TP_MTU_TABLE);
79 mtus[i] = G_MTUVALUE(v);
81 mtu_log[i] = G_MTUWIDTH(v);
86 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
88 * @addr: the indirect TP register address
89 * @mask: specifies the field within the register to modify
90 * @val: new value for the field
92 * Sets a field of an indirect TP register to the given value.
94 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
95 unsigned int mask, unsigned int val)
97 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
98 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
99 t4_write_reg(adap, A_TP_PIO_DATA, val);
102 /* The minimum additive increment value for the congestion control table */
103 #define CC_MIN_INCR 2U
106 * t4_load_mtus - write the MTU and congestion control HW tables
108 * @mtus: the values for the MTU table
109 * @alpha: the values for the congestion control alpha parameter
110 * @beta: the values for the congestion control beta parameter
112 * Write the HW MTU table with the supplied MTUs and the high-speed
113 * congestion control table with the supplied alpha, beta, and MTUs.
114 * We write the two tables together because the additive increments
115 * depend on the MTUs.
117 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
118 const unsigned short *alpha, const unsigned short *beta)
120 static const unsigned int avg_pkts[NCCTRL_WIN] = {
121 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
122 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
123 28672, 40960, 57344, 81920, 114688, 163840, 229376
128 for (i = 0; i < NMTUS; ++i) {
129 unsigned int mtu = mtus[i];
130 unsigned int log2 = cxgbe_fls(mtu);
132 if (!(mtu & ((1 << log2) >> 2))) /* round */
134 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
135 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
137 for (w = 0; w < NCCTRL_WIN; ++w) {
140 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
143 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
144 (w << 16) | (beta[w] << 13) | inc);
150 * t4_wait_op_done_val - wait until an operation is completed
151 * @adapter: the adapter performing the operation
152 * @reg: the register to check for completion
153 * @mask: a single-bit field within @reg that indicates completion
154 * @polarity: the value of the field when the operation is completed
155 * @attempts: number of check iterations
156 * @delay: delay in usecs between iterations
157 * @valp: where to store the value of the register at completion time
159 * Wait until an operation is completed by checking a bit in a register
160 * up to @attempts times. If @valp is not NULL the value of the register
161 * at the time it indicated completion is stored there. Returns 0 if the
162 * operation completes and -EAGAIN otherwise.
164 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
165 int polarity, int attempts, int delay, u32 *valp)
168 u32 val = t4_read_reg(adapter, reg);
170 if (!!(val & mask) == polarity) {
183 * t4_set_reg_field - set a register field to a value
184 * @adapter: the adapter to program
185 * @addr: the register address
186 * @mask: specifies the portion of the register to modify
187 * @val: the new value for the register field
189 * Sets a register field specified by the supplied mask to the
192 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
195 u32 v = t4_read_reg(adapter, addr) & ~mask;
197 t4_write_reg(adapter, addr, v | val);
198 (void)t4_read_reg(adapter, addr); /* flush */
202 * t4_read_indirect - read indirectly addressed registers
204 * @addr_reg: register holding the indirect address
205 * @data_reg: register holding the value of the indirect register
206 * @vals: where the read register values are stored
207 * @nregs: how many indirect registers to read
208 * @start_idx: index of first indirect register to read
210 * Reads registers that are accessed indirectly through an address/data
213 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
214 unsigned int data_reg, u32 *vals, unsigned int nregs,
215 unsigned int start_idx)
218 t4_write_reg(adap, addr_reg, start_idx);
219 *vals++ = t4_read_reg(adap, data_reg);
225 * t4_write_indirect - write indirectly addressed registers
227 * @addr_reg: register holding the indirect addresses
228 * @data_reg: register holding the value for the indirect registers
229 * @vals: values to write
230 * @nregs: how many indirect registers to write
231 * @start_idx: address of first indirect register to write
233 * Writes a sequential block of registers that are accessed indirectly
234 * through an address/data register pair.
236 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
237 unsigned int data_reg, const u32 *vals,
238 unsigned int nregs, unsigned int start_idx)
241 t4_write_reg(adap, addr_reg, start_idx++);
242 t4_write_reg(adap, data_reg, *vals++);
247 * t4_report_fw_error - report firmware error
250 * The adapter firmware can indicate error conditions to the host.
251 * If the firmware has indicated an error, print out the reason for
252 * the firmware error.
254 static void t4_report_fw_error(struct adapter *adap)
256 static const char * const reason[] = {
257 "Crash", /* PCIE_FW_EVAL_CRASH */
258 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
259 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
260 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
261 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
262 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
263 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
264 "Reserved", /* reserved */
268 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
269 if (pcie_fw & F_PCIE_FW_ERR)
270 pr_err("%s: Firmware reports adapter error: %s\n",
271 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
275 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
277 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
280 for ( ; nflit; nflit--, mbox_addr += 8)
281 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
285 * Handle a FW assertion reported in a mailbox.
287 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
289 struct fw_debug_cmd asrt;
291 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
292 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
293 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
294 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
297 #define X_CIM_PF_NOACCESS 0xeeeeeeee
300 * If the Host OS Driver needs locking arround accesses to the mailbox, this
301 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
303 /* makes single-statement usage a bit cleaner ... */
304 #ifdef T4_OS_NEEDS_MBOX_LOCKING
305 #define T4_OS_MBOX_LOCKING(x) x
307 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
311 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
313 * @mbox: index of the mailbox to use
314 * @cmd: the command to write
315 * @size: command length in bytes
316 * @rpl: where to optionally store the reply
317 * @sleep_ok: if true we may sleep while awaiting command completion
318 * @timeout: time to wait for command to finish before timing out
319 * (negative implies @sleep_ok=false)
321 * Sends the given command to FW through the selected mailbox and waits
322 * for the FW to execute the command. If @rpl is not %NULL it is used to
323 * store the FW's reply to the command. The command and its optional
324 * reply are of the same length. Some FW commands like RESET and
325 * INITIALIZE can take a considerable amount of time to execute.
326 * @sleep_ok determines whether we may sleep while awaiting the response.
327 * If sleeping is allowed we use progressive backoff otherwise we spin.
328 * Note that passing in a negative @timeout is an alternate mechanism
329 * for specifying @sleep_ok=false. This is useful when a higher level
330 * interface allows for specification of @timeout but not @sleep_ok ...
332 * Returns 0 on success or a negative errno on failure. A
333 * failure can happen either because we are not able to execute the
334 * command or FW executes it but signals an error. In the latter case
335 * the return value is the error code indicated by FW (negated).
337 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
338 const void __attribute__((__may_alias__)) *cmd,
339 int size, void *rpl, bool sleep_ok, int timeout)
342 * We delay in small increments at first in an effort to maintain
343 * responsiveness for simple, fast executing commands but then back
344 * off to larger delays to a maximum retry delay.
346 static const int delay[] = {
347 1, 1, 3, 5, 10, 10, 20, 50, 100
353 unsigned int delay_idx;
354 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
356 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
357 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
359 struct mbox_entry entry;
362 if ((size & 15) || size > MBOX_LEN) {
368 memcpy(p, (const __be64 *)cmd, size);
371 * If we have a negative timeout, that implies that we can't sleep.
378 #ifdef T4_OS_NEEDS_MBOX_LOCKING
380 * Queue ourselves onto the mailbox access list. When our entry is at
381 * the front of the list, we have rights to access the mailbox. So we
382 * wait [for a while] till we're at the front [or bail out with an
385 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
390 for (i = 0; ; i += ms) {
392 * If we've waited too long, return a busy indication. This
393 * really ought to be based on our initial position in the
394 * mailbox access list but this is a start. We very rarely
395 * contend on access to the mailbox ... Also check for a
396 * firmware error which we'll report as a device error.
398 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
399 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
400 t4_os_atomic_list_del(&entry, &adap->mbox_list,
402 t4_report_fw_error(adap);
403 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
407 * If we're at the head, break out and start the mailbox
410 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
414 * Delay for a bit before checking again ...
417 ms = delay[delay_idx]; /* last element may repeat */
418 if (delay_idx < ARRAY_SIZE(delay) - 1)
425 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
428 * Attempt to gain access to the mailbox.
430 for (i = 0; i < 4; i++) {
431 ctl = t4_read_reg(adap, ctl_reg);
433 if (v != X_MBOWNER_NONE)
438 * If we were unable to gain access, dequeue ourselves from the
439 * mailbox atomic access list and report the error to our caller.
441 if (v != X_MBOWNER_PL) {
442 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
445 t4_report_fw_error(adap);
446 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
450 * If we gain ownership of the mailbox and there's a "valid" message
451 * in it, this is likely an asynchronous error message from the
452 * firmware. So we'll report that and then proceed on with attempting
453 * to issue our own command ... which may well fail if the error
454 * presaged the firmware crashing ...
456 if (ctl & F_MBMSGVALID) {
457 dev_err(adap, "found VALID command in mbox %u: "
458 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
459 (unsigned long long)t4_read_reg64(adap, data_reg),
460 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
461 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
462 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
463 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
464 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
465 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
466 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
470 * Copy in the new mailbox command and send it on its way ...
472 for (i = 0; i < size; i += 8, p++)
473 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
475 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
476 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
477 (unsigned long long)t4_read_reg64(adap, data_reg),
478 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
479 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
480 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
481 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
482 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
483 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
484 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
486 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
487 t4_read_reg(adap, ctl_reg); /* flush write */
493 * Loop waiting for the reply; bail out if we time out or the firmware
496 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
497 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
499 ms = delay[delay_idx]; /* last element may repeat */
500 if (delay_idx < ARRAY_SIZE(delay) - 1)
507 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
508 v = t4_read_reg(adap, ctl_reg);
509 if (v == X_CIM_PF_NOACCESS)
511 if (G_MBOWNER(v) == X_MBOWNER_PL) {
512 if (!(v & F_MBMSGVALID)) {
513 t4_write_reg(adap, ctl_reg,
514 V_MBOWNER(X_MBOWNER_NONE));
518 CXGBE_DEBUG_MBOX(adap,
519 "%s: mbox %u: %016llx %016llx %016llx %016llx "
520 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
521 (unsigned long long)t4_read_reg64(adap, data_reg),
522 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
523 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
524 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
525 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
526 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
527 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
528 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
530 CXGBE_DEBUG_MBOX(adap,
531 "command %#x completed in %d ms (%ssleeping)\n",
533 i + ms, sleep_ok ? "" : "non-");
535 res = t4_read_reg64(adap, data_reg);
536 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
537 fw_asrt(adap, data_reg);
538 res = V_FW_CMD_RETVAL(EIO);
540 get_mbox_rpl(adap, rpl, size / 8, data_reg);
542 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
544 t4_os_atomic_list_del(&entry, &adap->mbox_list,
546 return -G_FW_CMD_RETVAL((int)res);
551 * We timed out waiting for a reply to our mailbox command. Report
552 * the error and also check to see if the firmware reported any
555 dev_err(adap, "command %#x in mailbox %d timed out\n",
556 *(const u8 *)cmd, mbox);
557 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
560 t4_report_fw_error(adap);
562 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
565 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
566 void *rpl, bool sleep_ok)
568 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
572 /* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */
573 #define EEPROM_DELAY 10 /* 10us per poll spin */
574 #define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */
576 #define EEPROM_STAT_ADDR 0x7bfc
579 * Small utility function to wait till any outstanding VPD Access is complete.
580 * We have a per-adapter state variable "VPD Busy" to indicate when we have a
581 * VPD Access in flight. This allows us to handle the problem of having a
582 * previous VPD Access time out and prevent an attempt to inject a new VPD
583 * Request before any in-flight VPD request has completed.
585 static int t4_seeprom_wait(struct adapter *adapter)
587 unsigned int base = adapter->params.pci.vpd_cap_addr;
590 /* If no VPD Access is in flight, we can just return success right
593 if (!adapter->vpd_busy)
596 /* Poll the VPD Capability Address/Flag register waiting for it
597 * to indicate that the operation is complete.
599 max_poll = EEPROM_MAX_POLL;
603 udelay(EEPROM_DELAY);
604 t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val);
606 /* If the operation is complete, mark the VPD as no longer
607 * busy and return success.
609 if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) {
610 adapter->vpd_busy = 0;
613 } while (--max_poll);
615 /* Failure! Note that we leave the VPD Busy status set in order to
616 * avoid pushing a new VPD Access request into the VPD Capability till
617 * the current operation eventually succeeds. It's a bug to issue a
618 * new request when an existing request is in flight and will result
619 * in corrupt hardware state.
625 * t4_seeprom_read - read a serial EEPROM location
626 * @adapter: adapter to read
627 * @addr: EEPROM virtual address
628 * @data: where to store the read data
630 * Read a 32-bit word from a location in serial EEPROM using the card's PCI
631 * VPD capability. Note that this function must be called with a virtual
634 int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
636 unsigned int base = adapter->params.pci.vpd_cap_addr;
639 /* VPD Accesses must alway be 4-byte aligned!
641 if (addr >= EEPROMVSIZE || (addr & 3))
644 /* Wait for any previous operation which may still be in flight to
647 ret = t4_seeprom_wait(adapter);
649 dev_err(adapter, "VPD still busy from previous operation\n");
653 /* Issue our new VPD Read request, mark the VPD as being busy and wait
654 * for our request to complete. If it doesn't complete, note the
655 * error and return it to our caller. Note that we do not reset the
658 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr);
659 adapter->vpd_busy = 1;
660 adapter->vpd_flag = PCI_VPD_ADDR_F;
661 ret = t4_seeprom_wait(adapter);
663 dev_err(adapter, "VPD read of address %#x failed\n", addr);
667 /* Grab the returned data, swizzle it into our endianness and
670 t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data);
671 *data = le32_to_cpu(*data);
676 * t4_seeprom_write - write a serial EEPROM location
677 * @adapter: adapter to write
678 * @addr: virtual EEPROM address
679 * @data: value to write
681 * Write a 32-bit word to a location in serial EEPROM using the card's PCI
682 * VPD capability. Note that this function must be called with a virtual
685 int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
687 unsigned int base = adapter->params.pci.vpd_cap_addr;
692 /* VPD Accesses must alway be 4-byte aligned!
694 if (addr >= EEPROMVSIZE || (addr & 3))
697 /* Wait for any previous operation which may still be in flight to
700 ret = t4_seeprom_wait(adapter);
702 dev_err(adapter, "VPD still busy from previous operation\n");
706 /* Issue our new VPD Read request, mark the VPD as being busy and wait
707 * for our request to complete. If it doesn't complete, note the
708 * error and return it to our caller. Note that we do not reset the
711 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA,
713 t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR,
714 (u16)addr | PCI_VPD_ADDR_F);
715 adapter->vpd_busy = 1;
716 adapter->vpd_flag = 0;
717 ret = t4_seeprom_wait(adapter);
719 dev_err(adapter, "VPD write of address %#x failed\n", addr);
723 /* Reset PCI_VPD_DATA register after a transaction and wait for our
724 * request to complete. If it doesn't complete, return error.
726 t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0);
727 max_poll = EEPROM_MAX_POLL;
729 udelay(EEPROM_DELAY);
730 t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg);
731 } while ((stats_reg & 0x1) && --max_poll);
735 /* Return success! */
740 * t4_seeprom_wp - enable/disable EEPROM write protection
741 * @adapter: the adapter
742 * @enable: whether to enable or disable write protection
744 * Enables or disables write protection on the serial EEPROM.
746 int t4_seeprom_wp(struct adapter *adapter, int enable)
748 return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
752 * t4_config_rss_range - configure a portion of the RSS mapping table
753 * @adapter: the adapter
754 * @mbox: mbox to use for the FW command
755 * @viid: virtual interface whose RSS subtable is to be written
756 * @start: start entry in the table to write
757 * @n: how many table entries to write
758 * @rspq: values for the "response queue" (Ingress Queue) lookup table
759 * @nrspq: number of values in @rspq
761 * Programs the selected part of the VI's RSS mapping table with the
762 * provided values. If @nrspq < @n the supplied values are used repeatedly
763 * until the full table range is populated.
765 * The caller must ensure the values in @rspq are in the range allowed for
768 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
769 int start, int n, const u16 *rspq, unsigned int nrspq)
772 const u16 *rsp = rspq;
773 const u16 *rsp_end = rspq + nrspq;
774 struct fw_rss_ind_tbl_cmd cmd;
776 memset(&cmd, 0, sizeof(cmd));
777 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
778 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
779 V_FW_RSS_IND_TBL_CMD_VIID(viid));
780 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
783 * Each firmware RSS command can accommodate up to 32 RSS Ingress
784 * Queue Identifiers. These Ingress Queue IDs are packed three to
785 * a 32-bit word as 10-bit values with the upper remaining 2 bits
791 __be32 *qp = &cmd.iq0_to_iq2;
794 * Set up the firmware RSS command header to send the next
795 * "nq" Ingress Queue IDs to the firmware.
797 cmd.niqid = cpu_to_be16(nq);
798 cmd.startidx = cpu_to_be16(start);
801 * "nq" more done for the start of the next loop.
807 * While there are still Ingress Queue IDs to stuff into the
808 * current firmware RSS command, retrieve them from the
809 * Ingress Queue ID array and insert them into the command.
813 * Grab up to the next 3 Ingress Queue IDs (wrapping
814 * around the Ingress Queue ID array if necessary) and
815 * insert them into the firmware RSS command at the
816 * current 3-tuple position within the commad.
820 int nqbuf = min(3, nq);
826 while (nqbuf && nq_packed < 32) {
833 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
834 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
835 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
839 * Send this portion of the RRS table update to the firmware;
840 * bail out on any errors.
842 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
851 * t4_config_vi_rss - configure per VI RSS settings
852 * @adapter: the adapter
853 * @mbox: mbox to use for the FW command
856 * @defq: id of the default RSS queue for the VI.
858 * Configures VI-specific RSS properties.
860 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
861 unsigned int flags, unsigned int defq)
863 struct fw_rss_vi_config_cmd c;
865 memset(&c, 0, sizeof(c));
866 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
867 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
868 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
869 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
870 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
871 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
872 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
876 * init_cong_ctrl - initialize congestion control parameters
877 * @a: the alpha values for congestion control
878 * @b: the beta values for congestion control
880 * Initialize the congestion control parameters.
882 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
886 for (i = 0; i < 9; i++) {
940 #define INIT_CMD(var, cmd, rd_wr) do { \
941 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
942 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
943 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
946 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
948 u32 cclk_param, cclk_val;
952 * Ask firmware for the Core Clock since it knows how to translate the
953 * Reference Clock ('V2') VPD field into a Core Clock value ...
955 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
956 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
957 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
958 1, &cclk_param, &cclk_val);
960 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
966 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
970 /* serial flash and firmware constants and flash config file constants */
972 SF_ATTEMPTS = 10, /* max retries for SF operations */
974 /* flash command opcodes */
975 SF_PROG_PAGE = 2, /* program page */
976 SF_WR_DISABLE = 4, /* disable writes */
977 SF_RD_STATUS = 5, /* read status register */
978 SF_WR_ENABLE = 6, /* enable writes */
979 SF_RD_DATA_FAST = 0xb, /* read flash */
980 SF_RD_ID = 0x9f, /* read ID */
981 SF_ERASE_SECTOR = 0xd8, /* erase sector */
985 * sf1_read - read data from the serial flash
986 * @adapter: the adapter
987 * @byte_cnt: number of bytes to read
988 * @cont: whether another operation will be chained
989 * @lock: whether to lock SF for PL access only
990 * @valp: where to store the read data
992 * Reads up to 4 bytes of data from the serial flash. The location of
993 * the read needs to be specified prior to calling this by issuing the
994 * appropriate commands to the serial flash.
996 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
1001 if (!byte_cnt || byte_cnt > 4)
1003 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
1005 t4_write_reg(adapter, A_SF_OP,
1006 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
1007 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
1009 *valp = t4_read_reg(adapter, A_SF_DATA);
1014 * sf1_write - write data to the serial flash
1015 * @adapter: the adapter
1016 * @byte_cnt: number of bytes to write
1017 * @cont: whether another operation will be chained
1018 * @lock: whether to lock SF for PL access only
1019 * @val: value to write
1021 * Writes up to 4 bytes of data to the serial flash. The location of
1022 * the write needs to be specified prior to calling this by issuing the
1023 * appropriate commands to the serial flash.
1025 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
1028 if (!byte_cnt || byte_cnt > 4)
1030 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
1032 t4_write_reg(adapter, A_SF_DATA, val);
1033 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
1034 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
1035 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
1039 * t4_read_flash - read words from serial flash
1040 * @adapter: the adapter
1041 * @addr: the start address for the read
1042 * @nwords: how many 32-bit words to read
1043 * @data: where to store the read data
1044 * @byte_oriented: whether to store data as bytes or as words
1046 * Read the specified number of 32-bit words from the serial flash.
1047 * If @byte_oriented is set the read data is stored as a byte array
1048 * (i.e., big-endian), otherwise as 32-bit words in the platform's
1049 * natural endianness.
1051 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1052 unsigned int nwords, u32 *data, int byte_oriented)
1056 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
1060 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
1062 ret = sf1_write(adapter, 4, 1, 0, addr);
1066 ret = sf1_read(adapter, 1, 1, 0, data);
1070 for ( ; nwords; nwords--, data++) {
1071 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
1073 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
1077 *data = cpu_to_be32(*data);
1083 * t4_get_fw_version - read the firmware version
1084 * @adapter: the adapter
1085 * @vers: where to place the version
1087 * Reads the FW version from flash.
1089 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
1091 return t4_read_flash(adapter, FLASH_FW_START +
1092 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
1096 * t4_get_tp_version - read the TP microcode version
1097 * @adapter: the adapter
1098 * @vers: where to place the version
1100 * Reads the TP microcode version from flash.
1102 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
1104 return t4_read_flash(adapter, FLASH_FW_START +
1105 offsetof(struct fw_hdr, tp_microcode_ver),
1109 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1110 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1111 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
1114 * t4_link_l1cfg - apply link configuration to MAC/PHY
1115 * @phy: the PHY to setup
1116 * @mac: the MAC to setup
1117 * @lc: the requested link configuration
1119 * Set up a port's MAC and PHY according to a desired link configuration.
1120 * - If the PHY can auto-negotiate first decide what to advertise, then
1121 * enable/disable auto-negotiation as desired, and reset.
1122 * - If the PHY does not auto-negotiate just reset it.
1123 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1124 * otherwise do it later based on the outcome of auto-negotiation.
1126 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
1127 struct link_config *lc)
1129 struct fw_port_cmd c;
1130 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
1133 if (lc->requested_fc & PAUSE_RX)
1134 fc |= FW_PORT_CAP_FC_RX;
1135 if (lc->requested_fc & PAUSE_TX)
1136 fc |= FW_PORT_CAP_FC_TX;
1138 memset(&c, 0, sizeof(c));
1139 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
1140 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
1141 V_FW_PORT_CMD_PORTID(port));
1143 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1146 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1147 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
1149 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1150 } else if (lc->autoneg == AUTONEG_DISABLE) {
1151 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
1152 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1154 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
1157 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1161 * t4_flash_cfg_addr - return the address of the flash configuration file
1162 * @adapter: the adapter
1164 * Return the address within the flash where the Firmware Configuration
1165 * File is stored, or an error if the device FLASH is too small to contain
1166 * a Firmware Configuration File.
1168 int t4_flash_cfg_addr(struct adapter *adapter)
1171 * If the device FLASH isn't large enough to hold a Firmware
1172 * Configuration File, return an error.
1174 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
1177 return FLASH_CFG_START;
1180 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
1183 * t4_intr_enable - enable interrupts
1184 * @adapter: the adapter whose interrupts should be enabled
1186 * Enable PF-specific interrupts for the calling function and the top-level
1187 * interrupt concentrator for global interrupts. Interrupts are already
1188 * enabled at each module, here we just enable the roots of the interrupt
1191 * Note: this function should be called only when the driver manages
1192 * non PF-specific interrupts from the various HW modules. Only one PCI
1193 * function at a time should be doing this.
1195 void t4_intr_enable(struct adapter *adapter)
1198 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
1200 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1201 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
1202 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
1203 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
1204 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
1205 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
1206 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
1207 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
1208 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
1209 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
1210 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
1214 * t4_intr_disable - disable interrupts
1215 * @adapter: the adapter whose interrupts should be disabled
1217 * Disable interrupts. We only disable the top-level interrupt
1218 * concentrators. The caller must be a PCI function managing global
1221 void t4_intr_disable(struct adapter *adapter)
1223 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
1225 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
1226 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
1230 * t4_get_port_type_description - return Port Type string description
1231 * @port_type: firmware Port Type enumeration
1233 const char *t4_get_port_type_description(enum fw_port_type port_type)
1235 static const char * const port_type_description[] = {
1254 if (port_type < ARRAY_SIZE(port_type_description))
1255 return port_type_description[port_type];
1260 * t4_get_mps_bg_map - return the buffer groups associated with a port
1261 * @adap: the adapter
1262 * @idx: the port index
1264 * Returns a bitmap indicating which MPS buffer groups are associated
1265 * with the given port. Bit i is set if buffer group i is used by the
1268 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
1270 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
1273 return idx == 0 ? 0xf : 0;
1275 return idx < 2 ? (3 << (2 * idx)) : 0;
1280 * t4_get_port_stats - collect port statistics
1281 * @adap: the adapter
1282 * @idx: the port index
1283 * @p: the stats structure to fill
1285 * Collect statistics related to the given port from HW.
1287 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1289 u32 bgmap = t4_get_mps_bg_map(adap, idx);
1291 #define GET_STAT(name) \
1292 t4_read_reg64(adap, \
1293 (is_t4(adap->params.chip) ? \
1294 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
1295 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
1296 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
1298 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1299 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1300 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1301 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1302 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1303 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1304 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1305 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1306 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1307 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1308 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1309 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1310 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1311 p->tx_drop = GET_STAT(TX_PORT_DROP);
1312 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1313 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1314 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1315 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1316 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1317 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1318 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1319 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1320 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1322 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1323 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1324 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1325 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1326 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1327 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1328 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1329 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1330 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1331 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1332 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1333 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1334 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1335 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1336 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1337 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1338 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1339 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1340 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1341 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1342 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1343 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1344 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1345 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1346 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1347 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1348 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1349 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1350 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1351 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1352 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1353 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1354 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1355 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1356 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1363 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
1364 * @adap: The adapter
1366 * @stats: Current stats to fill
1367 * @offset: Previous stats snapshot
1369 void t4_get_port_stats_offset(struct adapter *adap, int idx,
1370 struct port_stats *stats,
1371 struct port_stats *offset)
1376 t4_get_port_stats(adap, idx, stats);
1377 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
1378 i < (sizeof(struct port_stats) / sizeof(u64));
1384 * t4_clr_port_stats - clear port statistics
1385 * @adap: the adapter
1386 * @idx: the port index
1388 * Clear HW statistics for the given port.
1390 void t4_clr_port_stats(struct adapter *adap, int idx)
1393 u32 bgmap = t4_get_mps_bg_map(adap, idx);
1396 if (is_t4(adap->params.chip))
1397 port_base_addr = PORT_BASE(idx);
1399 port_base_addr = T5_PORT_BASE(idx);
1401 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
1402 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
1403 t4_write_reg(adap, port_base_addr + i, 0);
1404 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
1405 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
1406 t4_write_reg(adap, port_base_addr + i, 0);
1407 for (i = 0; i < 4; i++)
1408 if (bgmap & (1 << i)) {
1410 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1413 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1419 * t4_fw_hello - establish communication with FW
1420 * @adap: the adapter
1421 * @mbox: mailbox to use for the FW command
1422 * @evt_mbox: mailbox to receive async FW events
1423 * @master: specifies the caller's willingness to be the device master
1424 * @state: returns the current device state (if non-NULL)
1426 * Issues a command to establish communication with FW. Returns either
1427 * an error (negative integer) or the mailbox of the Master PF.
1429 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1430 enum dev_master master, enum dev_state *state)
1433 struct fw_hello_cmd c;
1435 unsigned int master_mbox;
1436 int retries = FW_CMD_HELLO_RETRIES;
1439 memset(&c, 0, sizeof(c));
1440 INIT_CMD(c, HELLO, WRITE);
1441 c.err_to_clearinit = cpu_to_be32(
1442 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
1443 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
1444 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
1445 M_FW_HELLO_CMD_MBMASTER) |
1446 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
1447 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
1448 F_FW_HELLO_CMD_CLEARINIT);
1451 * Issue the HELLO command to the firmware. If it's not successful
1452 * but indicates that we got a "busy" or "timeout" condition, retry
1453 * the HELLO until we exhaust our retry limit. If we do exceed our
1454 * retry limit, check to see if the firmware left us any error
1455 * information and report that if so ...
1457 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1458 if (ret != FW_SUCCESS) {
1459 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
1461 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
1462 t4_report_fw_error(adap);
1466 v = be32_to_cpu(c.err_to_clearinit);
1467 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
1469 if (v & F_FW_HELLO_CMD_ERR)
1470 *state = DEV_STATE_ERR;
1471 else if (v & F_FW_HELLO_CMD_INIT)
1472 *state = DEV_STATE_INIT;
1474 *state = DEV_STATE_UNINIT;
1478 * If we're not the Master PF then we need to wait around for the
1479 * Master PF Driver to finish setting up the adapter.
1481 * Note that we also do this wait if we're a non-Master-capable PF and
1482 * there is no current Master PF; a Master PF may show up momentarily
1483 * and we wouldn't want to fail pointlessly. (This can happen when an
1484 * OS loads lots of different drivers rapidly at the same time). In
1485 * this case, the Master PF returned by the firmware will be
1486 * M_PCIE_FW_MASTER so the test below will work ...
1488 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
1489 master_mbox != mbox) {
1490 int waiting = FW_CMD_HELLO_TIMEOUT;
1493 * Wait for the firmware to either indicate an error or
1494 * initialized state. If we see either of these we bail out
1495 * and report the issue to the caller. If we exhaust the
1496 * "hello timeout" and we haven't exhausted our retries, try
1497 * again. Otherwise bail with a timeout error.
1506 * If neither Error nor Initialialized are indicated
1507 * by the firmware keep waiting till we exaust our
1508 * timeout ... and then retry if we haven't exhausted
1511 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
1512 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
1523 * We either have an Error or Initialized condition
1524 * report errors preferentially.
1527 if (pcie_fw & F_PCIE_FW_ERR)
1528 *state = DEV_STATE_ERR;
1529 else if (pcie_fw & F_PCIE_FW_INIT)
1530 *state = DEV_STATE_INIT;
1534 * If we arrived before a Master PF was selected and
1535 * there's not a valid Master PF, grab its identity
1538 if (master_mbox == M_PCIE_FW_MASTER &&
1539 (pcie_fw & F_PCIE_FW_MASTER_VLD))
1540 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
1549 * t4_fw_bye - end communication with FW
1550 * @adap: the adapter
1551 * @mbox: mailbox to use for the FW command
1553 * Issues a command to terminate communication with FW.
1555 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
1557 struct fw_bye_cmd c;
1559 memset(&c, 0, sizeof(c));
1560 INIT_CMD(c, BYE, WRITE);
1561 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1565 * t4_fw_reset - issue a reset to FW
1566 * @adap: the adapter
1567 * @mbox: mailbox to use for the FW command
1568 * @reset: specifies the type of reset to perform
1570 * Issues a reset command of the specified type to FW.
1572 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
1574 struct fw_reset_cmd c;
1576 memset(&c, 0, sizeof(c));
1577 INIT_CMD(c, RESET, WRITE);
1578 c.val = cpu_to_be32(reset);
1579 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1583 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
1584 * @adap: the adapter
1585 * @mbox: mailbox to use for the FW RESET command (if desired)
1586 * @force: force uP into RESET even if FW RESET command fails
1588 * Issues a RESET command to firmware (if desired) with a HALT indication
1589 * and then puts the microprocessor into RESET state. The RESET command
1590 * will only be issued if a legitimate mailbox is provided (mbox <=
1591 * M_PCIE_FW_MASTER).
1593 * This is generally used in order for the host to safely manipulate the
1594 * adapter without fear of conflicting with whatever the firmware might
1595 * be doing. The only way out of this state is to RESTART the firmware
1598 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
1603 * If a legitimate mailbox is provided, issue a RESET command
1604 * with a HALT indication.
1606 if (mbox <= M_PCIE_FW_MASTER) {
1607 struct fw_reset_cmd c;
1609 memset(&c, 0, sizeof(c));
1610 INIT_CMD(c, RESET, WRITE);
1611 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
1612 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
1613 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1617 * Normally we won't complete the operation if the firmware RESET
1618 * command fails but if our caller insists we'll go ahead and put the
1619 * uP into RESET. This can be useful if the firmware is hung or even
1620 * missing ... We'll have to take the risk of putting the uP into
1621 * RESET without the cooperation of firmware in that case.
1623 * We also force the firmware's HALT flag to be on in case we bypassed
1624 * the firmware RESET command above or we're dealing with old firmware
1625 * which doesn't have the HALT capability. This will serve as a flag
1626 * for the incoming firmware to know that it's coming out of a HALT
1627 * rather than a RESET ... if it's new enough to understand that ...
1629 if (ret == 0 || force) {
1630 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
1631 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
1636 * And we always return the result of the firmware RESET command
1637 * even when we force the uP into RESET ...
1643 * t4_fw_restart - restart the firmware by taking the uP out of RESET
1644 * @adap: the adapter
1645 * @mbox: mailbox to use for the FW RESET command (if desired)
1646 * @reset: if we want to do a RESET to restart things
1648 * Restart firmware previously halted by t4_fw_halt(). On successful
1649 * return the previous PF Master remains as the new PF Master and there
1650 * is no need to issue a new HELLO command, etc.
1652 * We do this in two ways:
1654 * 1. If we're dealing with newer firmware we'll simply want to take
1655 * the chip's microprocessor out of RESET. This will cause the
1656 * firmware to start up from its start vector. And then we'll loop
1657 * until the firmware indicates it's started again (PCIE_FW.HALT
1658 * reset to 0) or we timeout.
1660 * 2. If we're dealing with older firmware then we'll need to RESET
1661 * the chip since older firmware won't recognize the PCIE_FW.HALT
1662 * flag and automatically RESET itself on startup.
1664 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
1668 * Since we're directing the RESET instead of the firmware
1669 * doing it automatically, we need to clear the PCIE_FW.HALT
1672 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
1675 * If we've been given a valid mailbox, first try to get the
1676 * firmware to do the RESET. If that works, great and we can
1677 * return success. Otherwise, if we haven't been given a
1678 * valid mailbox or the RESET command failed, fall back to
1679 * hitting the chip with a hammer.
1681 if (mbox <= M_PCIE_FW_MASTER) {
1682 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
1684 if (t4_fw_reset(adap, mbox,
1685 F_PIORST | F_PIORSTMODE) == 0)
1689 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
1694 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
1695 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1696 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
1707 * t4_fixup_host_params_compat - fix up host-dependent parameters
1708 * @adap: the adapter
1709 * @page_size: the host's Base Page Size
1710 * @cache_line_size: the host's Cache Line Size
1711 * @chip_compat: maintain compatibility with designated chip
1713 * Various registers in the chip contain values which are dependent on the
1714 * host's Base Page and Cache Line Sizes. This function will fix all of
1715 * those registers with the appropriate values as passed in ...
1717 * @chip_compat is used to limit the set of changes that are made
1718 * to be compatible with the indicated chip release. This is used by
1719 * drivers to maintain compatibility with chip register settings when
1720 * the drivers haven't [yet] been updated with new chip support.
1722 int t4_fixup_host_params_compat(struct adapter *adap,
1723 unsigned int page_size,
1724 unsigned int cache_line_size,
1725 enum chip_type chip_compat)
1727 unsigned int page_shift = cxgbe_fls(page_size) - 1;
1728 unsigned int sge_hps = page_shift - 10;
1729 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
1730 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
1731 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
1733 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
1734 V_HOSTPAGESIZEPF0(sge_hps) |
1735 V_HOSTPAGESIZEPF1(sge_hps) |
1736 V_HOSTPAGESIZEPF2(sge_hps) |
1737 V_HOSTPAGESIZEPF3(sge_hps) |
1738 V_HOSTPAGESIZEPF4(sge_hps) |
1739 V_HOSTPAGESIZEPF5(sge_hps) |
1740 V_HOSTPAGESIZEPF6(sge_hps) |
1741 V_HOSTPAGESIZEPF7(sge_hps));
1743 if (is_t4(adap->params.chip) || is_t4(chip_compat))
1744 t4_set_reg_field(adap, A_SGE_CONTROL,
1745 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
1746 F_EGRSTATUSPAGESIZE,
1747 V_INGPADBOUNDARY(fl_align_log -
1748 X_INGPADBOUNDARY_SHIFT) |
1749 V_EGRSTATUSPAGESIZE(stat_len != 64));
1752 * T5 introduced the separation of the Free List Padding and
1753 * Packing Boundaries. Thus, we can select a smaller Padding
1754 * Boundary to avoid uselessly chewing up PCIe Link and Memory
1755 * Bandwidth, and use a Packing Boundary which is large enough
1756 * to avoid false sharing between CPUs, etc.
1758 * For the PCI Link, the smaller the Padding Boundary the
1759 * better. For the Memory Controller, a smaller Padding
1760 * Boundary is better until we cross under the Memory Line
1761 * Size (the minimum unit of transfer to/from Memory). If we
1762 * have a Padding Boundary which is smaller than the Memory
1763 * Line Size, that'll involve a Read-Modify-Write cycle on the
1764 * Memory Controller which is never good. For T5 the smallest
1765 * Padding Boundary which we can select is 32 bytes which is
1766 * larger than any known Memory Controller Line Size so we'll
1771 * N.B. T5 has a different interpretation of the "0" value for
1772 * the Packing Boundary. This corresponds to 16 bytes instead
1773 * of the expected 32 bytes. We never have a Packing Boundary
1774 * less than 32 bytes so we can't use that special value but
1775 * on the other hand, if we wanted 32 bytes, the best we can
1776 * really do is 64 bytes ...
1778 if (fl_align <= 32) {
1782 t4_set_reg_field(adap, A_SGE_CONTROL,
1783 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
1784 F_EGRSTATUSPAGESIZE,
1785 V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
1786 V_EGRSTATUSPAGESIZE(stat_len != 64));
1787 t4_set_reg_field(adap, A_SGE_CONTROL2,
1788 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
1789 V_INGPACKBOUNDARY(fl_align_log -
1790 X_INGPACKBOUNDARY_SHIFT));
1794 * Adjust various SGE Free List Host Buffer Sizes.
1796 * The first four entries are:
1800 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
1801 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
1803 * For the single-MTU buffers in unpacked mode we need to include
1804 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
1805 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
1806 * Padding boundary. All of these are accommodated in the Factory
1807 * Default Firmware Configuration File but we need to adjust it for
1808 * this host's cache line size.
1810 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
1811 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
1812 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
1814 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
1815 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
1818 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
1824 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
1825 * @adap: the adapter
1826 * @page_size: the host's Base Page Size
1827 * @cache_line_size: the host's Cache Line Size
1829 * Various registers in T4 contain values which are dependent on the
1830 * host's Base Page and Cache Line Sizes. This function will fix all of
1831 * those registers with the appropriate values as passed in ...
1833 * This routine makes changes which are compatible with T4 chips.
1835 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1836 unsigned int cache_line_size)
1838 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
1843 * t4_fw_initialize - ask FW to initialize the device
1844 * @adap: the adapter
1845 * @mbox: mailbox to use for the FW command
1847 * Issues a command to FW to partially initialize the device. This
1848 * performs initialization that generally doesn't depend on user input.
1850 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
1852 struct fw_initialize_cmd c;
1854 memset(&c, 0, sizeof(c));
1855 INIT_CMD(c, INITIALIZE, WRITE);
1856 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1860 * t4_query_params_rw - query FW or device parameters
1861 * @adap: the adapter
1862 * @mbox: mailbox to use for the FW command
1865 * @nparams: the number of parameters
1866 * @params: the parameter names
1867 * @val: the parameter values
1868 * @rw: Write and read flag
1870 * Reads the value of FW or device parameters. Up to 7 parameters can be
1873 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
1874 unsigned int pf, unsigned int vf,
1875 unsigned int nparams, const u32 *params,
1880 struct fw_params_cmd c;
1881 __be32 *p = &c.param[0].mnem;
1886 memset(&c, 0, sizeof(c));
1887 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
1888 F_FW_CMD_REQUEST | F_FW_CMD_READ |
1889 V_FW_PARAMS_CMD_PFN(pf) |
1890 V_FW_PARAMS_CMD_VFN(vf));
1891 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1893 for (i = 0; i < nparams; i++) {
1894 *p++ = cpu_to_be32(*params++);
1896 *p = cpu_to_be32(*(val + i));
1900 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1902 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
1903 *val++ = be32_to_cpu(*p);
1907 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1908 unsigned int vf, unsigned int nparams, const u32 *params,
1911 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
1915 * t4_set_params_timeout - sets FW or device parameters
1916 * @adap: the adapter
1917 * @mbox: mailbox to use for the FW command
1920 * @nparams: the number of parameters
1921 * @params: the parameter names
1922 * @val: the parameter values
1923 * @timeout: the timeout time
1925 * Sets the value of FW or device parameters. Up to 7 parameters can be
1926 * specified at once.
1928 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
1929 unsigned int pf, unsigned int vf,
1930 unsigned int nparams, const u32 *params,
1931 const u32 *val, int timeout)
1933 struct fw_params_cmd c;
1934 __be32 *p = &c.param[0].mnem;
1939 memset(&c, 0, sizeof(c));
1940 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
1941 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1942 V_FW_PARAMS_CMD_PFN(pf) |
1943 V_FW_PARAMS_CMD_VFN(vf));
1944 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1947 *p++ = cpu_to_be32(*params++);
1948 *p++ = cpu_to_be32(*val++);
1951 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
1954 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1955 unsigned int vf, unsigned int nparams, const u32 *params,
1958 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
1959 FW_CMD_MAX_TIMEOUT);
1963 * t4_alloc_vi_func - allocate a virtual interface
1964 * @adap: the adapter
1965 * @mbox: mailbox to use for the FW command
1966 * @port: physical port associated with the VI
1967 * @pf: the PF owning the VI
1968 * @vf: the VF owning the VI
1969 * @nmac: number of MAC addresses needed (1 to 5)
1970 * @mac: the MAC addresses of the VI
1971 * @rss_size: size of RSS table slice associated with this VI
1972 * @portfunc: which Port Application Function MAC Address is desired
1973 * @idstype: Intrusion Detection Type
1975 * Allocates a virtual interface for the given physical port. If @mac is
1976 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
1977 * @mac should be large enough to hold @nmac Ethernet addresses, they are
1978 * stored consecutively so the space needed is @nmac * 6 bytes.
1979 * Returns a negative error number or the non-negative VI id.
1981 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
1982 unsigned int port, unsigned int pf, unsigned int vf,
1983 unsigned int nmac, u8 *mac, unsigned int *rss_size,
1984 unsigned int portfunc, unsigned int idstype)
1989 memset(&c, 0, sizeof(c));
1990 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
1991 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1992 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
1993 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
1994 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
1995 V_FW_VI_CMD_FUNC(portfunc));
1996 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
1999 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2004 memcpy(mac, c.mac, sizeof(c.mac));
2007 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2010 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2013 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2016 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2021 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
2022 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
2026 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
2027 * @adap: the adapter
2028 * @mbox: mailbox to use for the FW command
2029 * @port: physical port associated with the VI
2030 * @pf: the PF owning the VI
2031 * @vf: the VF owning the VI
2032 * @nmac: number of MAC addresses needed (1 to 5)
2033 * @mac: the MAC addresses of the VI
2034 * @rss_size: size of RSS table slice associated with this VI
2036 * Backwards compatible and convieniance routine to allocate a Virtual
2037 * Interface with a Ethernet Port Application Function and Intrustion
2038 * Detection System disabled.
2040 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2041 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2042 unsigned int *rss_size)
2044 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
2049 * t4_free_vi - free a virtual interface
2050 * @adap: the adapter
2051 * @mbox: mailbox to use for the FW command
2052 * @pf: the PF owning the VI
2053 * @vf: the VF owning the VI
2054 * @viid: virtual interface identifiler
2056 * Free a previously allocated virtual interface.
2058 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2059 unsigned int vf, unsigned int viid)
2063 memset(&c, 0, sizeof(c));
2064 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
2065 F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
2066 V_FW_VI_CMD_VFN(vf));
2067 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
2068 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
2070 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2074 * t4_set_rxmode - set Rx properties of a virtual interface
2075 * @adap: the adapter
2076 * @mbox: mailbox to use for the FW command
2078 * @mtu: the new MTU or -1
2079 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2080 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2081 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2082 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
2084 * @sleep_ok: if true we may sleep while awaiting command completion
2086 * Sets Rx properties of a virtual interface.
2088 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2089 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2092 struct fw_vi_rxmode_cmd c;
2094 /* convert to FW values */
2096 mtu = M_FW_VI_RXMODE_CMD_MTU;
2098 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
2100 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
2102 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
2104 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
2106 memset(&c, 0, sizeof(c));
2107 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
2108 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2109 V_FW_VI_RXMODE_CMD_VIID(viid));
2110 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
2111 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
2112 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2113 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2114 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2115 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2116 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2120 * t4_change_mac - modifies the exact-match filter for a MAC address
2121 * @adap: the adapter
2122 * @mbox: mailbox to use for the FW command
2124 * @idx: index of existing filter for old value of MAC address, or -1
2125 * @addr: the new MAC address value
2126 * @persist: whether a new MAC allocation should be persistent
2127 * @add_smt: if true also add the address to the HW SMT
2129 * Modifies an exact-match filter and sets it to the new MAC address if
2130 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
2131 * latter case the address is added persistently if @persist is %true.
2133 * Note that in general it is not possible to modify the value of a given
2134 * filter so the generic way to modify an address filter is to free the one
2135 * being used by the old address value and allocate a new filter for the
2136 * new address value.
2138 * Returns a negative error number or the index of the filter with the new
2139 * MAC value. Note that this index may differ from @idx.
2141 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2142 int idx, const u8 *addr, bool persist, bool add_smt)
2145 struct fw_vi_mac_cmd c;
2146 struct fw_vi_mac_exact *p = c.u.exact;
2147 int max_mac_addr = adap->params.arch.mps_tcam_size;
2149 if (idx < 0) /* new allocation */
2150 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2151 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2153 memset(&c, 0, sizeof(c));
2154 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
2155 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
2156 V_FW_VI_MAC_CMD_VIID(viid));
2157 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
2158 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
2159 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2160 V_FW_VI_MAC_CMD_IDX(idx));
2161 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2163 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2165 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
2166 if (ret >= max_mac_addr)
2173 * t4_enable_vi_params - enable/disable a virtual interface
2174 * @adap: the adapter
2175 * @mbox: mailbox to use for the FW command
2177 * @rx_en: 1=enable Rx, 0=disable Rx
2178 * @tx_en: 1=enable Tx, 0=disable Tx
2179 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
2181 * Enables/disables a virtual interface. Note that setting DCB Enable
2182 * only makes sense when enabling a Virtual Interface ...
2184 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
2185 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
2187 struct fw_vi_enable_cmd c;
2189 memset(&c, 0, sizeof(c));
2190 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
2191 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2192 V_FW_VI_ENABLE_CMD_VIID(viid));
2193 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
2194 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
2195 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
2197 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
2201 * t4_enable_vi - enable/disable a virtual interface
2202 * @adap: the adapter
2203 * @mbox: mailbox to use for the FW command
2205 * @rx_en: 1=enable Rx, 0=disable Rx
2206 * @tx_en: 1=enable Tx, 0=disable Tx
2208 * Enables/disables a virtual interface. Note that setting DCB Enable
2209 * only makes sense when enabling a Virtual Interface ...
2211 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2212 bool rx_en, bool tx_en)
2214 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
2218 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2219 * @adap: the adapter
2220 * @mbox: mailbox to use for the FW command
2221 * @start: %true to enable the queues, %false to disable them
2222 * @pf: the PF owning the queues
2223 * @vf: the VF owning the queues
2224 * @iqid: ingress queue id
2225 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2226 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2228 * Starts or stops an ingress queue and its associated FLs, if any.
2230 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2231 unsigned int pf, unsigned int vf, unsigned int iqid,
2232 unsigned int fl0id, unsigned int fl1id)
2236 memset(&c, 0, sizeof(c));
2237 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
2238 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
2239 V_FW_IQ_CMD_VFN(vf));
2240 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
2241 V_FW_IQ_CMD_IQSTOP(!start) |
2243 c.iqid = cpu_to_be16(iqid);
2244 c.fl0id = cpu_to_be16(fl0id);
2245 c.fl1id = cpu_to_be16(fl1id);
2246 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2250 * t4_iq_free - free an ingress queue and its FLs
2251 * @adap: the adapter
2252 * @mbox: mailbox to use for the FW command
2253 * @pf: the PF owning the queues
2254 * @vf: the VF owning the queues
2255 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
2256 * @iqid: ingress queue id
2257 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2258 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2260 * Frees an ingress queue and its associated FLs, if any.
2262 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2263 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2264 unsigned int fl0id, unsigned int fl1id)
2268 memset(&c, 0, sizeof(c));
2269 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
2270 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
2271 V_FW_IQ_CMD_VFN(vf));
2272 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
2273 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
2274 c.iqid = cpu_to_be16(iqid);
2275 c.fl0id = cpu_to_be16(fl0id);
2276 c.fl1id = cpu_to_be16(fl1id);
2277 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2281 * t4_eth_eq_free - free an Ethernet egress queue
2282 * @adap: the adapter
2283 * @mbox: mailbox to use for the FW command
2284 * @pf: the PF owning the queue
2285 * @vf: the VF owning the queue
2286 * @eqid: egress queue id
2288 * Frees an Ethernet egress queue.
2290 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2291 unsigned int vf, unsigned int eqid)
2293 struct fw_eq_eth_cmd c;
2295 memset(&c, 0, sizeof(c));
2296 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
2297 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2298 V_FW_EQ_ETH_CMD_PFN(pf) |
2299 V_FW_EQ_ETH_CMD_VFN(vf));
2300 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2301 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
2302 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2306 * t4_handle_fw_rpl - process a FW reply message
2307 * @adap: the adapter
2308 * @rpl: start of the FW message
2310 * Processes a FW message, such as link state change messages.
2312 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2314 u8 opcode = *(const u8 *)rpl;
2317 * This might be a port command ... this simplifies the following
2318 * conditionals ... We can get away with pre-dereferencing
2319 * action_to_len16 because it's in the first 16 bytes and all messages
2320 * will be at least that long.
2322 const struct fw_port_cmd *p = (const void *)rpl;
2323 unsigned int action =
2324 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
2326 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
2327 /* link/module state change message */
2328 int speed = 0, fc = 0, i;
2329 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
2330 struct port_info *pi = NULL;
2331 struct link_config *lc;
2332 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
2333 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
2334 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
2336 if (stat & F_FW_PORT_CMD_RXPAUSE)
2338 if (stat & F_FW_PORT_CMD_TXPAUSE)
2340 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2341 speed = ETH_SPEED_NUM_100M;
2342 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2343 speed = ETH_SPEED_NUM_1G;
2344 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2345 speed = ETH_SPEED_NUM_10G;
2346 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
2347 speed = ETH_SPEED_NUM_40G;
2349 for_each_port(adap, i) {
2350 pi = adap2pinfo(adap, i);
2351 if (pi->tx_chan == chan)
2356 if (mod != pi->mod_type) {
2358 t4_os_portmod_changed(adap, i);
2360 if (link_ok != lc->link_ok || speed != lc->speed ||
2361 fc != lc->fc) { /* something changed */
2362 if (!link_ok && lc->link_ok) {
2363 static const char * const reason[] = {
2366 "Auto-negotiation Failure",
2368 "Insufficient Airflow",
2369 "Unable To Determine Reason",
2370 "No RX Signal Detected",
2373 unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
2375 dev_warn(adap, "Port %d link down, reason: %s\n",
2378 lc->link_ok = link_ok;
2381 lc->supported = be16_to_cpu(p->u.info.pcap);
2384 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
2390 void t4_reset_link_config(struct adapter *adap, int idx)
2392 struct port_info *pi = adap2pinfo(adap, idx);
2393 struct link_config *lc = &pi->link_cfg;
2396 lc->requested_speed = 0;
2397 lc->requested_fc = 0;
2403 * init_link_config - initialize a link's SW state
2404 * @lc: structure holding the link state
2405 * @caps: link capabilities
2407 * Initializes the SW state maintained for each link, including the link's
2408 * capabilities and default speed/flow-control/autonegotiation settings.
2410 static void init_link_config(struct link_config *lc,
2413 lc->supported = caps;
2414 lc->requested_speed = 0;
2416 lc->requested_fc = 0;
2418 if (lc->supported & FW_PORT_CAP_ANEG) {
2419 lc->advertising = lc->supported & ADVERT_MASK;
2420 lc->autoneg = AUTONEG_ENABLE;
2422 lc->advertising = 0;
2423 lc->autoneg = AUTONEG_DISABLE;
2428 * t4_wait_dev_ready - wait till to reads of registers work
2430 * Right after the device is RESET is can take a small amount of time
2431 * for it to respond to register reads. Until then, all reads will
2432 * return either 0xff...ff or 0xee...ee. Return an error if reads
2433 * don't work within a reasonable time frame.
2435 static int t4_wait_dev_ready(struct adapter *adapter)
2439 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2441 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
2445 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2446 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
2451 u32 vendor_and_model_id;
2455 int t4_get_flash_params(struct adapter *adapter)
2458 * Table for non-Numonix supported flash parts. Numonix parts are left
2459 * to the preexisting well-tested code. All flash parts have 64KB
2462 static struct flash_desc supported_flash[] = {
2463 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
2470 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
2472 ret = sf1_read(adapter, 3, 0, 1, &info);
2473 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2477 for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
2478 if (supported_flash[i].vendor_and_model_id == info) {
2479 adapter->params.sf_size = supported_flash[i].size_mb;
2480 adapter->params.sf_nsec =
2481 adapter->params.sf_size / SF_SEC_SIZE;
2485 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2487 info >>= 16; /* log2 of size */
2488 if (info >= 0x14 && info < 0x18)
2489 adapter->params.sf_nsec = 1 << (info - 16);
2490 else if (info == 0x18)
2491 adapter->params.sf_nsec = 64;
2494 adapter->params.sf_size = 1 << info;
2497 * We should reject adapters with FLASHes which are too small. So, emit
2500 if (adapter->params.sf_size < FLASH_MIN_SIZE) {
2501 dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
2502 adapter->params.sf_size, FLASH_MIN_SIZE);
2508 static void set_pcie_completion_timeout(struct adapter *adapter,
2514 pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP);
2516 t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val);
2519 t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val);
2524 * t4_prep_adapter - prepare SW and HW for operation
2525 * @adapter: the adapter
2527 * Initialize adapter SW state for the various HW modules, set initial
2528 * values for some adapter tunables, take PHYs out of reset, and
2529 * initialize the MDIO interface.
2531 int t4_prep_adapter(struct adapter *adapter)
2536 ret = t4_wait_dev_ready(adapter);
2540 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
2541 adapter->params.pci.device_id = adapter->pdev->id.device_id;
2542 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
2545 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
2546 * ADAPTER (VERSION << 4 | REVISION)
2548 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
2549 adapter->params.chip = 0;
2552 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
2553 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
2554 adapter->params.arch.mps_tcam_size =
2555 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
2556 adapter->params.arch.mps_rplc_size = 128;
2557 adapter->params.arch.nchan = NCHAN;
2558 adapter->params.arch.vfcount = 128;
2561 dev_err(adapter, "%s: Device %d is not supported\n",
2562 __func__, adapter->params.pci.device_id);
2566 adapter->params.pci.vpd_cap_addr =
2567 t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD);
2569 ret = t4_get_flash_params(adapter);
2573 adapter->params.cim_la_size = CIMLA_SIZE;
2575 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2578 * Default port and clock for debugging in case we can't reach FW.
2580 adapter->params.nports = 1;
2581 adapter->params.portvec = 1;
2582 adapter->params.vpd.cclk = 50000;
2584 /* Set pci completion timeout value to 4 seconds. */
2585 set_pcie_completion_timeout(adapter, 0xd);
2590 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
2591 * @adapter: the adapter
2592 * @qid: the Queue ID
2593 * @qtype: the Ingress or Egress type for @qid
2594 * @pbar2_qoffset: BAR2 Queue Offset
2595 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2597 * Returns the BAR2 SGE Queue Registers information associated with the
2598 * indicated Absolute Queue ID. These are passed back in return value
2599 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
2600 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
2602 * This may return an error which indicates that BAR2 SGE Queue
2603 * registers aren't available. If an error is not returned, then the
2604 * following values are returned:
2606 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
2607 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
2609 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
2610 * require the "Inferred Queue ID" ability may be used. E.g. the
2611 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
2612 * then these "Inferred Queue ID" register may not be used.
2614 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
2615 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
2616 unsigned int *pbar2_qid)
2618 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
2619 u64 bar2_page_offset, bar2_qoffset;
2620 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
2623 * T4 doesn't support BAR2 SGE Queue registers.
2625 if (is_t4(adapter->params.chip))
2629 * Get our SGE Page Size parameters.
2631 page_shift = adapter->params.sge.hps + 10;
2632 page_size = 1 << page_shift;
2635 * Get the right Queues per Page parameters for our Queue.
2637 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
2638 adapter->params.sge.eq_qpp :
2639 adapter->params.sge.iq_qpp);
2640 qpp_mask = (1 << qpp_shift) - 1;
2643 * Calculate the basics of the BAR2 SGE Queue register area:
2644 * o The BAR2 page the Queue registers will be in.
2645 * o The BAR2 Queue ID.
2646 * o The BAR2 Queue ID Offset into the BAR2 page.
2648 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
2649 bar2_qid = qid & qpp_mask;
2650 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
2653 * If the BAR2 Queue ID Offset is less than the Page Size, then the
2654 * hardware will infer the Absolute Queue ID simply from the writes to
2655 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
2656 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
2657 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
2658 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
2659 * from the BAR2 Page and BAR2 Queue ID.
2661 * One important censequence of this is that some BAR2 SGE registers
2662 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
2663 * there. But other registers synthesize the SGE Queue ID purely
2664 * from the writes to the registers -- the Write Combined Doorbell
2665 * Buffer is a good example. These BAR2 SGE Registers are only
2666 * available for those BAR2 SGE Register areas where the SGE Absolute
2667 * Queue ID can be inferred from simple writes.
2669 bar2_qoffset = bar2_page_offset;
2670 bar2_qinferred = (bar2_qid_offset < page_size);
2671 if (bar2_qinferred) {
2672 bar2_qoffset += bar2_qid_offset;
2676 *pbar2_qoffset = bar2_qoffset;
2677 *pbar2_qid = bar2_qid;
2682 * t4_init_sge_params - initialize adap->params.sge
2683 * @adapter: the adapter
2685 * Initialize various fields of the adapter's SGE Parameters structure.
2687 int t4_init_sge_params(struct adapter *adapter)
2689 struct sge_params *sge_params = &adapter->params.sge;
2691 unsigned int s_hps, s_qpp;
2694 * Extract the SGE Page Size for our PF.
2696 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
2697 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
2699 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
2702 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
2704 s_qpp = (S_QUEUESPERPAGEPF0 +
2705 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
2706 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
2707 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
2708 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
2709 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
2715 * t4_init_tp_params - initialize adap->params.tp
2716 * @adap: the adapter
2718 * Initialize various fields of the adapter's TP Parameters structure.
2720 int t4_init_tp_params(struct adapter *adap)
2725 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
2726 adap->params.tp.tre = G_TIMERRESOLUTION(v);
2727 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
2729 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
2730 for (chan = 0; chan < NCHAN; chan++)
2731 adap->params.tp.tx_modq[chan] = chan;
2734 * Cache the adapter's Compressed Filter Mode and global Incress
2737 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2738 &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
2739 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2740 &adap->params.tp.ingress_config, 1,
2741 A_TP_INGRESS_CONFIG);
2744 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
2745 * shift positions of several elements of the Compressed Filter Tuple
2746 * for this adapter which we need frequently ...
2748 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
2749 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
2750 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
2751 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
2755 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
2756 * represents the presense of an Outer VLAN instead of a VNIC ID.
2758 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
2759 adap->params.tp.vnic_shift = -1;
2765 * t4_filter_field_shift - calculate filter field shift
2766 * @adap: the adapter
2767 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
2769 * Return the shift position of a filter field within the Compressed
2770 * Filter Tuple. The filter field is specified via its selection bit
2771 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
2773 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
2775 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
2779 if ((filter_mode & filter_sel) == 0)
2782 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
2783 switch (filter_mode & sel) {
2785 field_shift += W_FT_FCOE;
2788 field_shift += W_FT_PORT;
2791 field_shift += W_FT_VNIC_ID;
2794 field_shift += W_FT_VLAN;
2797 field_shift += W_FT_TOS;
2800 field_shift += W_FT_PROTOCOL;
2803 field_shift += W_FT_ETHERTYPE;
2806 field_shift += W_FT_MACMATCH;
2809 field_shift += W_FT_MPSHITTYPE;
2811 case F_FRAGMENTATION:
2812 field_shift += W_FT_FRAGMENTATION;
2819 int t4_init_rss_mode(struct adapter *adap, int mbox)
2822 struct fw_rss_vi_config_cmd rvc;
2824 memset(&rvc, 0, sizeof(rvc));
2826 for_each_port(adap, i) {
2827 struct port_info *p = adap2pinfo(adap, i);
2829 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2830 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2831 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2832 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2833 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2836 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2841 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2845 struct fw_port_cmd c;
2847 memset(&c, 0, sizeof(c));
2849 for_each_port(adap, i) {
2850 unsigned int rss_size = 0;
2851 struct port_info *p = adap2pinfo(adap, i);
2853 while ((adap->params.portvec & (1 << j)) == 0)
2856 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2857 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2858 V_FW_PORT_CMD_PORTID(j));
2859 c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
2860 FW_PORT_ACTION_GET_PORT_INFO) |
2862 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2866 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2872 p->rss_size = rss_size;
2873 t4_os_set_hw_addr(adap, i, addr);
2875 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
2876 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
2877 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
2878 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
2879 p->mod_type = FW_PORT_MOD_TYPE_NA;
2881 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));