4 * Copyright(c) 2014-2015 Chelsio Communications.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Chelsio Communications nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <netinet/in.h>
36 #include <rte_interrupts.h>
38 #include <rte_debug.h>
40 #include <rte_atomic.h>
41 #include <rte_branch_prediction.h>
42 #include <rte_memory.h>
43 #include <rte_memzone.h>
44 #include <rte_tailq.h>
46 #include <rte_alarm.h>
47 #include <rte_ether.h>
48 #include <rte_ethdev.h>
49 #include <rte_atomic.h>
50 #include <rte_malloc.h>
51 #include <rte_random.h>
53 #include <rte_byteorder.h>
57 #include "t4_regs_values.h"
58 #include "t4fw_interface.h"
60 static void init_link_config(struct link_config *lc, unsigned int caps);
63 * t4_read_mtu_tbl - returns the values in the HW path MTU table
65 * @mtus: where to store the MTU values
66 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
68 * Reads the HW path MTU table.
70 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
75 for (i = 0; i < NMTUS; ++i) {
76 t4_write_reg(adap, A_TP_MTU_TABLE,
77 V_MTUINDEX(0xff) | V_MTUVALUE(i));
78 v = t4_read_reg(adap, A_TP_MTU_TABLE);
79 mtus[i] = G_MTUVALUE(v);
81 mtu_log[i] = G_MTUWIDTH(v);
86 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
88 * @addr: the indirect TP register address
89 * @mask: specifies the field within the register to modify
90 * @val: new value for the field
92 * Sets a field of an indirect TP register to the given value.
94 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
95 unsigned int mask, unsigned int val)
97 t4_write_reg(adap, A_TP_PIO_ADDR, addr);
98 val |= t4_read_reg(adap, A_TP_PIO_DATA) & ~mask;
99 t4_write_reg(adap, A_TP_PIO_DATA, val);
102 /* The minimum additive increment value for the congestion control table */
103 #define CC_MIN_INCR 2U
106 * t4_load_mtus - write the MTU and congestion control HW tables
108 * @mtus: the values for the MTU table
109 * @alpha: the values for the congestion control alpha parameter
110 * @beta: the values for the congestion control beta parameter
112 * Write the HW MTU table with the supplied MTUs and the high-speed
113 * congestion control table with the supplied alpha, beta, and MTUs.
114 * We write the two tables together because the additive increments
115 * depend on the MTUs.
117 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
118 const unsigned short *alpha, const unsigned short *beta)
120 static const unsigned int avg_pkts[NCCTRL_WIN] = {
121 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
122 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
123 28672, 40960, 57344, 81920, 114688, 163840, 229376
128 for (i = 0; i < NMTUS; ++i) {
129 unsigned int mtu = mtus[i];
130 unsigned int log2 = cxgbe_fls(mtu);
132 if (!(mtu & ((1 << log2) >> 2))) /* round */
134 t4_write_reg(adap, A_TP_MTU_TABLE, V_MTUINDEX(i) |
135 V_MTUWIDTH(log2) | V_MTUVALUE(mtu));
137 for (w = 0; w < NCCTRL_WIN; ++w) {
140 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
143 t4_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
144 (w << 16) | (beta[w] << 13) | inc);
150 * t4_wait_op_done_val - wait until an operation is completed
151 * @adapter: the adapter performing the operation
152 * @reg: the register to check for completion
153 * @mask: a single-bit field within @reg that indicates completion
154 * @polarity: the value of the field when the operation is completed
155 * @attempts: number of check iterations
156 * @delay: delay in usecs between iterations
157 * @valp: where to store the value of the register at completion time
159 * Wait until an operation is completed by checking a bit in a register
160 * up to @attempts times. If @valp is not NULL the value of the register
161 * at the time it indicated completion is stored there. Returns 0 if the
162 * operation completes and -EAGAIN otherwise.
164 int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
165 int polarity, int attempts, int delay, u32 *valp)
168 u32 val = t4_read_reg(adapter, reg);
170 if (!!(val & mask) == polarity) {
183 * t4_set_reg_field - set a register field to a value
184 * @adapter: the adapter to program
185 * @addr: the register address
186 * @mask: specifies the portion of the register to modify
187 * @val: the new value for the register field
189 * Sets a register field specified by the supplied mask to the
192 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
195 u32 v = t4_read_reg(adapter, addr) & ~mask;
197 t4_write_reg(adapter, addr, v | val);
198 (void)t4_read_reg(adapter, addr); /* flush */
202 * t4_read_indirect - read indirectly addressed registers
204 * @addr_reg: register holding the indirect address
205 * @data_reg: register holding the value of the indirect register
206 * @vals: where the read register values are stored
207 * @nregs: how many indirect registers to read
208 * @start_idx: index of first indirect register to read
210 * Reads registers that are accessed indirectly through an address/data
213 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
214 unsigned int data_reg, u32 *vals, unsigned int nregs,
215 unsigned int start_idx)
218 t4_write_reg(adap, addr_reg, start_idx);
219 *vals++ = t4_read_reg(adap, data_reg);
225 * t4_write_indirect - write indirectly addressed registers
227 * @addr_reg: register holding the indirect addresses
228 * @data_reg: register holding the value for the indirect registers
229 * @vals: values to write
230 * @nregs: how many indirect registers to write
231 * @start_idx: address of first indirect register to write
233 * Writes a sequential block of registers that are accessed indirectly
234 * through an address/data register pair.
236 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
237 unsigned int data_reg, const u32 *vals,
238 unsigned int nregs, unsigned int start_idx)
241 t4_write_reg(adap, addr_reg, start_idx++);
242 t4_write_reg(adap, data_reg, *vals++);
247 * t4_report_fw_error - report firmware error
250 * The adapter firmware can indicate error conditions to the host.
251 * If the firmware has indicated an error, print out the reason for
252 * the firmware error.
254 static void t4_report_fw_error(struct adapter *adap)
256 static const char * const reason[] = {
257 "Crash", /* PCIE_FW_EVAL_CRASH */
258 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
259 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
260 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
261 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
262 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
263 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
264 "Reserved", /* reserved */
268 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
269 if (pcie_fw & F_PCIE_FW_ERR)
270 pr_err("%s: Firmware reports adapter error: %s\n",
271 __func__, reason[G_PCIE_FW_EVAL(pcie_fw)]);
275 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
277 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
280 for ( ; nflit; nflit--, mbox_addr += 8)
281 *rpl++ = htobe64(t4_read_reg64(adap, mbox_addr));
285 * Handle a FW assertion reported in a mailbox.
287 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
289 struct fw_debug_cmd asrt;
291 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
292 pr_warn("FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
293 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
294 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
297 #define X_CIM_PF_NOACCESS 0xeeeeeeee
300 * If the Host OS Driver needs locking arround accesses to the mailbox, this
301 * can be turned on via the T4_OS_NEEDS_MBOX_LOCKING CPP define ...
303 /* makes single-statement usage a bit cleaner ... */
304 #ifdef T4_OS_NEEDS_MBOX_LOCKING
305 #define T4_OS_MBOX_LOCKING(x) x
307 #define T4_OS_MBOX_LOCKING(x) do {} while (0)
311 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
313 * @mbox: index of the mailbox to use
314 * @cmd: the command to write
315 * @size: command length in bytes
316 * @rpl: where to optionally store the reply
317 * @sleep_ok: if true we may sleep while awaiting command completion
318 * @timeout: time to wait for command to finish before timing out
319 * (negative implies @sleep_ok=false)
321 * Sends the given command to FW through the selected mailbox and waits
322 * for the FW to execute the command. If @rpl is not %NULL it is used to
323 * store the FW's reply to the command. The command and its optional
324 * reply are of the same length. Some FW commands like RESET and
325 * INITIALIZE can take a considerable amount of time to execute.
326 * @sleep_ok determines whether we may sleep while awaiting the response.
327 * If sleeping is allowed we use progressive backoff otherwise we spin.
328 * Note that passing in a negative @timeout is an alternate mechanism
329 * for specifying @sleep_ok=false. This is useful when a higher level
330 * interface allows for specification of @timeout but not @sleep_ok ...
332 * Returns 0 on success or a negative errno on failure. A
333 * failure can happen either because we are not able to execute the
334 * command or FW executes it but signals an error. In the latter case
335 * the return value is the error code indicated by FW (negated).
337 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
338 const void __attribute__((__may_alias__)) *cmd,
339 int size, void *rpl, bool sleep_ok, int timeout)
342 * We delay in small increments at first in an effort to maintain
343 * responsiveness for simple, fast executing commands but then back
344 * off to larger delays to a maximum retry delay.
346 static const int delay[] = {
347 1, 1, 3, 5, 10, 10, 20, 50, 100
353 unsigned int delay_idx;
354 __be64 *temp = (__be64 *)malloc(size * sizeof(char));
356 u32 data_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_DATA);
357 u32 ctl_reg = PF_REG(mbox, A_CIM_PF_MAILBOX_CTRL);
359 struct mbox_entry entry;
362 if ((size & 15) || size > MBOX_LEN) {
368 memcpy(p, (const __be64 *)cmd, size);
371 * If we have a negative timeout, that implies that we can't sleep.
378 #ifdef T4_OS_NEEDS_MBOX_LOCKING
380 * Queue ourselves onto the mailbox access list. When our entry is at
381 * the front of the list, we have rights to access the mailbox. So we
382 * wait [for a while] till we're at the front [or bail out with an
385 t4_os_atomic_add_tail(&entry, &adap->mbox_list, &adap->mbox_lock);
390 for (i = 0; ; i += ms) {
392 * If we've waited too long, return a busy indication. This
393 * really ought to be based on our initial position in the
394 * mailbox access list but this is a start. We very rarely
395 * contend on access to the mailbox ... Also check for a
396 * firmware error which we'll report as a device error.
398 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
399 if (i > 4 * timeout || (pcie_fw & F_PCIE_FW_ERR)) {
400 t4_os_atomic_list_del(&entry, &adap->mbox_list,
402 t4_report_fw_error(adap);
403 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -EBUSY;
407 * If we're at the head, break out and start the mailbox
410 if (t4_os_list_first_entry(&adap->mbox_list) == &entry)
414 * Delay for a bit before checking again ...
417 ms = delay[delay_idx]; /* last element may repeat */
418 if (delay_idx < ARRAY_SIZE(delay) - 1)
425 #endif /* T4_OS_NEEDS_MBOX_LOCKING */
428 * Attempt to gain access to the mailbox.
430 for (i = 0; i < 4; i++) {
431 ctl = t4_read_reg(adap, ctl_reg);
433 if (v != X_MBOWNER_NONE)
438 * If we were unable to gain access, dequeue ourselves from the
439 * mailbox atomic access list and report the error to our caller.
441 if (v != X_MBOWNER_PL) {
442 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
445 t4_report_fw_error(adap);
446 return (v == X_MBOWNER_FW ? -EBUSY : -ETIMEDOUT);
450 * If we gain ownership of the mailbox and there's a "valid" message
451 * in it, this is likely an asynchronous error message from the
452 * firmware. So we'll report that and then proceed on with attempting
453 * to issue our own command ... which may well fail if the error
454 * presaged the firmware crashing ...
456 if (ctl & F_MBMSGVALID) {
457 dev_err(adap, "found VALID command in mbox %u: "
458 "%llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
459 (unsigned long long)t4_read_reg64(adap, data_reg),
460 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
461 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
462 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
463 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
464 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
465 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
466 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
470 * Copy in the new mailbox command and send it on its way ...
472 for (i = 0; i < size; i += 8, p++)
473 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
475 CXGBE_DEBUG_MBOX(adap, "%s: mbox %u: %016llx %016llx %016llx %016llx "
476 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
477 (unsigned long long)t4_read_reg64(adap, data_reg),
478 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
479 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
480 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
481 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
482 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
483 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
484 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
486 t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
487 t4_read_reg(adap, ctl_reg); /* flush write */
493 * Loop waiting for the reply; bail out if we time out or the firmware
496 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
497 for (i = 0; i < timeout && !(pcie_fw & F_PCIE_FW_ERR); i += ms) {
499 ms = delay[delay_idx]; /* last element may repeat */
500 if (delay_idx < ARRAY_SIZE(delay) - 1)
507 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
508 v = t4_read_reg(adap, ctl_reg);
509 if (v == X_CIM_PF_NOACCESS)
511 if (G_MBOWNER(v) == X_MBOWNER_PL) {
512 if (!(v & F_MBMSGVALID)) {
513 t4_write_reg(adap, ctl_reg,
514 V_MBOWNER(X_MBOWNER_NONE));
518 CXGBE_DEBUG_MBOX(adap,
519 "%s: mbox %u: %016llx %016llx %016llx %016llx "
520 "%016llx %016llx %016llx %016llx\n", __func__, (mbox),
521 (unsigned long long)t4_read_reg64(adap, data_reg),
522 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
523 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
524 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
525 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
526 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
527 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
528 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
530 CXGBE_DEBUG_MBOX(adap,
531 "command %#x completed in %d ms (%ssleeping)\n",
533 i + ms, sleep_ok ? "" : "non-");
535 res = t4_read_reg64(adap, data_reg);
536 if (G_FW_CMD_OP(res >> 32) == FW_DEBUG_CMD) {
537 fw_asrt(adap, data_reg);
538 res = V_FW_CMD_RETVAL(EIO);
540 get_mbox_rpl(adap, rpl, size / 8, data_reg);
542 t4_write_reg(adap, ctl_reg, V_MBOWNER(X_MBOWNER_NONE));
544 t4_os_atomic_list_del(&entry, &adap->mbox_list,
546 return -G_FW_CMD_RETVAL((int)res);
551 * We timed out waiting for a reply to our mailbox command. Report
552 * the error and also check to see if the firmware reported any
555 dev_err(adap, "command %#x in mailbox %d timed out\n",
556 *(const u8 *)cmd, mbox);
557 T4_OS_MBOX_LOCKING(t4_os_atomic_list_del(&entry,
560 t4_report_fw_error(adap);
562 return (pcie_fw & F_PCIE_FW_ERR) ? -ENXIO : -ETIMEDOUT;
565 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
566 void *rpl, bool sleep_ok)
568 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
573 * t4_config_rss_range - configure a portion of the RSS mapping table
574 * @adapter: the adapter
575 * @mbox: mbox to use for the FW command
576 * @viid: virtual interface whose RSS subtable is to be written
577 * @start: start entry in the table to write
578 * @n: how many table entries to write
579 * @rspq: values for the "response queue" (Ingress Queue) lookup table
580 * @nrspq: number of values in @rspq
582 * Programs the selected part of the VI's RSS mapping table with the
583 * provided values. If @nrspq < @n the supplied values are used repeatedly
584 * until the full table range is populated.
586 * The caller must ensure the values in @rspq are in the range allowed for
589 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
590 int start, int n, const u16 *rspq, unsigned int nrspq)
593 const u16 *rsp = rspq;
594 const u16 *rsp_end = rspq + nrspq;
595 struct fw_rss_ind_tbl_cmd cmd;
597 memset(&cmd, 0, sizeof(cmd));
598 cmd.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
599 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
600 V_FW_RSS_IND_TBL_CMD_VIID(viid));
601 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
604 * Each firmware RSS command can accommodate up to 32 RSS Ingress
605 * Queue Identifiers. These Ingress Queue IDs are packed three to
606 * a 32-bit word as 10-bit values with the upper remaining 2 bits
612 __be32 *qp = &cmd.iq0_to_iq2;
615 * Set up the firmware RSS command header to send the next
616 * "nq" Ingress Queue IDs to the firmware.
618 cmd.niqid = cpu_to_be16(nq);
619 cmd.startidx = cpu_to_be16(start);
622 * "nq" more done for the start of the next loop.
628 * While there are still Ingress Queue IDs to stuff into the
629 * current firmware RSS command, retrieve them from the
630 * Ingress Queue ID array and insert them into the command.
634 * Grab up to the next 3 Ingress Queue IDs (wrapping
635 * around the Ingress Queue ID array if necessary) and
636 * insert them into the firmware RSS command at the
637 * current 3-tuple position within the commad.
641 int nqbuf = min(3, nq);
647 while (nqbuf && nq_packed < 32) {
654 *qp++ = cpu_to_be32(V_FW_RSS_IND_TBL_CMD_IQ0(qbuf[0]) |
655 V_FW_RSS_IND_TBL_CMD_IQ1(qbuf[1]) |
656 V_FW_RSS_IND_TBL_CMD_IQ2(qbuf[2]));
660 * Send this portion of the RRS table update to the firmware;
661 * bail out on any errors.
663 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
672 * t4_config_vi_rss - configure per VI RSS settings
673 * @adapter: the adapter
674 * @mbox: mbox to use for the FW command
677 * @defq: id of the default RSS queue for the VI.
679 * Configures VI-specific RSS properties.
681 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
682 unsigned int flags, unsigned int defq)
684 struct fw_rss_vi_config_cmd c;
686 memset(&c, 0, sizeof(c));
687 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
688 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
689 V_FW_RSS_VI_CONFIG_CMD_VIID(viid));
690 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
691 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
692 V_FW_RSS_VI_CONFIG_CMD_DEFAULTQ(defq));
693 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
697 * init_cong_ctrl - initialize congestion control parameters
698 * @a: the alpha values for congestion control
699 * @b: the beta values for congestion control
701 * Initialize the congestion control parameters.
703 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
707 for (i = 0; i < 9; i++) {
761 #define INIT_CMD(var, cmd, rd_wr) do { \
762 (var).op_to_write = cpu_to_be32(V_FW_CMD_OP(FW_##cmd##_CMD) | \
763 F_FW_CMD_REQUEST | F_FW_CMD_##rd_wr); \
764 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
767 int t4_get_core_clock(struct adapter *adapter, struct vpd_params *p)
769 u32 cclk_param, cclk_val;
773 * Ask firmware for the Core Clock since it knows how to translate the
774 * Reference Clock ('V2') VPD field into a Core Clock value ...
776 cclk_param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
777 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
778 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
779 1, &cclk_param, &cclk_val);
781 dev_err(adapter, "%s: error in fetching from coreclock - %d\n",
787 dev_debug(adapter, "%s: p->cclk = %u\n", __func__, p->cclk);
791 /* serial flash and firmware constants and flash config file constants */
793 SF_ATTEMPTS = 10, /* max retries for SF operations */
795 /* flash command opcodes */
796 SF_PROG_PAGE = 2, /* program page */
797 SF_WR_DISABLE = 4, /* disable writes */
798 SF_RD_STATUS = 5, /* read status register */
799 SF_WR_ENABLE = 6, /* enable writes */
800 SF_RD_DATA_FAST = 0xb, /* read flash */
801 SF_RD_ID = 0x9f, /* read ID */
802 SF_ERASE_SECTOR = 0xd8, /* erase sector */
806 * sf1_read - read data from the serial flash
807 * @adapter: the adapter
808 * @byte_cnt: number of bytes to read
809 * @cont: whether another operation will be chained
810 * @lock: whether to lock SF for PL access only
811 * @valp: where to store the read data
813 * Reads up to 4 bytes of data from the serial flash. The location of
814 * the read needs to be specified prior to calling this by issuing the
815 * appropriate commands to the serial flash.
817 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
822 if (!byte_cnt || byte_cnt > 4)
824 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
826 t4_write_reg(adapter, A_SF_OP,
827 V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
828 ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
830 *valp = t4_read_reg(adapter, A_SF_DATA);
835 * sf1_write - write data to the serial flash
836 * @adapter: the adapter
837 * @byte_cnt: number of bytes to write
838 * @cont: whether another operation will be chained
839 * @lock: whether to lock SF for PL access only
840 * @val: value to write
842 * Writes up to 4 bytes of data to the serial flash. The location of
843 * the write needs to be specified prior to calling this by issuing the
844 * appropriate commands to the serial flash.
846 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
849 if (!byte_cnt || byte_cnt > 4)
851 if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
853 t4_write_reg(adapter, A_SF_DATA, val);
854 t4_write_reg(adapter, A_SF_OP, V_SF_LOCK(lock) |
855 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
856 return t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
860 * t4_read_flash - read words from serial flash
861 * @adapter: the adapter
862 * @addr: the start address for the read
863 * @nwords: how many 32-bit words to read
864 * @data: where to store the read data
865 * @byte_oriented: whether to store data as bytes or as words
867 * Read the specified number of 32-bit words from the serial flash.
868 * If @byte_oriented is set the read data is stored as a byte array
869 * (i.e., big-endian), otherwise as 32-bit words in the platform's
870 * natural endianness.
872 int t4_read_flash(struct adapter *adapter, unsigned int addr,
873 unsigned int nwords, u32 *data, int byte_oriented)
877 if (((addr + nwords * sizeof(u32)) > adapter->params.sf_size) ||
881 addr = rte_constant_bswap32(addr) | SF_RD_DATA_FAST;
883 ret = sf1_write(adapter, 4, 1, 0, addr);
887 ret = sf1_read(adapter, 1, 1, 0, data);
891 for ( ; nwords; nwords--, data++) {
892 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
894 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
898 *data = cpu_to_be32(*data);
904 * t4_get_fw_version - read the firmware version
905 * @adapter: the adapter
906 * @vers: where to place the version
908 * Reads the FW version from flash.
910 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
912 return t4_read_flash(adapter, FLASH_FW_START +
913 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
917 * t4_get_tp_version - read the TP microcode version
918 * @adapter: the adapter
919 * @vers: where to place the version
921 * Reads the TP microcode version from flash.
923 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
925 return t4_read_flash(adapter, FLASH_FW_START +
926 offsetof(struct fw_hdr, tp_microcode_ver),
930 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
931 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
932 FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
935 * t4_link_l1cfg - apply link configuration to MAC/PHY
936 * @phy: the PHY to setup
937 * @mac: the MAC to setup
938 * @lc: the requested link configuration
940 * Set up a port's MAC and PHY according to a desired link configuration.
941 * - If the PHY can auto-negotiate first decide what to advertise, then
942 * enable/disable auto-negotiation as desired, and reset.
943 * - If the PHY does not auto-negotiate just reset it.
944 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
945 * otherwise do it later based on the outcome of auto-negotiation.
947 int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
948 struct link_config *lc)
950 struct fw_port_cmd c;
951 unsigned int fc = 0, mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
954 if (lc->requested_fc & PAUSE_RX)
955 fc |= FW_PORT_CAP_FC_RX;
956 if (lc->requested_fc & PAUSE_TX)
957 fc |= FW_PORT_CAP_FC_TX;
959 memset(&c, 0, sizeof(c));
960 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
961 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
962 V_FW_PORT_CMD_PORTID(port));
964 cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
967 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
968 c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
970 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
971 } else if (lc->autoneg == AUTONEG_DISABLE) {
972 c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
973 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
975 c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
978 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
982 * t4_flash_cfg_addr - return the address of the flash configuration file
983 * @adapter: the adapter
985 * Return the address within the flash where the Firmware Configuration
986 * File is stored, or an error if the device FLASH is too small to contain
987 * a Firmware Configuration File.
989 int t4_flash_cfg_addr(struct adapter *adapter)
992 * If the device FLASH isn't large enough to hold a Firmware
993 * Configuration File, return an error.
995 if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
998 return FLASH_CFG_START;
1001 #define PF_INTR_MASK (F_PFSW | F_PFCIM)
1004 * t4_intr_enable - enable interrupts
1005 * @adapter: the adapter whose interrupts should be enabled
1007 * Enable PF-specific interrupts for the calling function and the top-level
1008 * interrupt concentrator for global interrupts. Interrupts are already
1009 * enabled at each module, here we just enable the roots of the interrupt
1012 * Note: this function should be called only when the driver manages
1013 * non PF-specific interrupts from the various HW modules. Only one PCI
1014 * function at a time should be doing this.
1016 void t4_intr_enable(struct adapter *adapter)
1019 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
1021 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
1022 val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
1023 t4_write_reg(adapter, A_SGE_INT_ENABLE3, F_ERR_CPL_EXCEED_IQE_SIZE |
1024 F_ERR_INVALID_CIDX_INC | F_ERR_CPL_OPCODE_0 |
1025 F_ERR_DATA_CPL_ON_HIGH_QID1 | F_INGRESS_SIZE_ERR |
1026 F_ERR_DATA_CPL_ON_HIGH_QID0 | F_ERR_BAD_DB_PIDX3 |
1027 F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
1028 F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO |
1029 F_DBFIFO_LP_INT | F_EGRESS_SIZE_ERR | val);
1030 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
1031 t4_set_reg_field(adapter, A_PL_INT_MAP0, 0, 1 << pf);
1035 * t4_intr_disable - disable interrupts
1036 * @adapter: the adapter whose interrupts should be disabled
1038 * Disable interrupts. We only disable the top-level interrupt
1039 * concentrators. The caller must be a PCI function managing global
1042 void t4_intr_disable(struct adapter *adapter)
1044 u32 pf = G_SOURCEPF(t4_read_reg(adapter, A_PL_WHOAMI));
1046 t4_write_reg(adapter, MYPF_REG(A_PL_PF_INT_ENABLE), 0);
1047 t4_set_reg_field(adapter, A_PL_INT_MAP0, 1 << pf, 0);
1051 * t4_get_port_type_description - return Port Type string description
1052 * @port_type: firmware Port Type enumeration
1054 const char *t4_get_port_type_description(enum fw_port_type port_type)
1056 static const char * const port_type_description[] = {
1075 if (port_type < ARRAY_SIZE(port_type_description))
1076 return port_type_description[port_type];
1081 * t4_get_mps_bg_map - return the buffer groups associated with a port
1082 * @adap: the adapter
1083 * @idx: the port index
1085 * Returns a bitmap indicating which MPS buffer groups are associated
1086 * with the given port. Bit i is set if buffer group i is used by the
1089 unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
1091 u32 n = G_NUMPORTS(t4_read_reg(adap, A_MPS_CMN_CTL));
1094 return idx == 0 ? 0xf : 0;
1096 return idx < 2 ? (3 << (2 * idx)) : 0;
1101 * t4_get_port_stats - collect port statistics
1102 * @adap: the adapter
1103 * @idx: the port index
1104 * @p: the stats structure to fill
1106 * Collect statistics related to the given port from HW.
1108 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1110 u32 bgmap = t4_get_mps_bg_map(adap, idx);
1112 #define GET_STAT(name) \
1113 t4_read_reg64(adap, \
1114 (is_t4(adap->params.chip) ? \
1115 PORT_REG(idx, A_MPS_PORT_STAT_##name##_L) :\
1116 T5_PORT_REG(idx, A_MPS_PORT_STAT_##name##_L)))
1117 #define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
1119 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1120 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1121 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1122 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1123 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1124 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1125 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1126 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1127 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1128 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1129 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1130 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1131 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1132 p->tx_drop = GET_STAT(TX_PORT_DROP);
1133 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1134 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1135 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1136 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1137 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1138 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1139 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1140 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1141 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1143 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1144 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1145 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1146 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1147 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1148 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1149 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1150 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1151 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1152 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1153 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1154 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1155 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1156 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1157 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1158 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1159 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1160 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1161 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1162 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1163 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1164 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1165 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1166 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1167 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1168 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1169 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1170 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1171 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1172 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1173 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1174 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1175 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1176 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1177 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1184 * t4_get_port_stats_offset - collect port stats relative to a previous snapshot
1185 * @adap: The adapter
1187 * @stats: Current stats to fill
1188 * @offset: Previous stats snapshot
1190 void t4_get_port_stats_offset(struct adapter *adap, int idx,
1191 struct port_stats *stats,
1192 struct port_stats *offset)
1197 t4_get_port_stats(adap, idx, stats);
1198 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
1199 i < (sizeof(struct port_stats) / sizeof(u64));
1205 * t4_clr_port_stats - clear port statistics
1206 * @adap: the adapter
1207 * @idx: the port index
1209 * Clear HW statistics for the given port.
1211 void t4_clr_port_stats(struct adapter *adap, int idx)
1214 u32 bgmap = t4_get_mps_bg_map(adap, idx);
1217 if (is_t4(adap->params.chip))
1218 port_base_addr = PORT_BASE(idx);
1220 port_base_addr = T5_PORT_BASE(idx);
1222 for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
1223 i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
1224 t4_write_reg(adap, port_base_addr + i, 0);
1225 for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
1226 i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
1227 t4_write_reg(adap, port_base_addr + i, 0);
1228 for (i = 0; i < 4; i++)
1229 if (bgmap & (1 << i)) {
1231 A_MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L +
1234 A_MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L +
1240 * t4_fw_hello - establish communication with FW
1241 * @adap: the adapter
1242 * @mbox: mailbox to use for the FW command
1243 * @evt_mbox: mailbox to receive async FW events
1244 * @master: specifies the caller's willingness to be the device master
1245 * @state: returns the current device state (if non-NULL)
1247 * Issues a command to establish communication with FW. Returns either
1248 * an error (negative integer) or the mailbox of the Master PF.
1250 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1251 enum dev_master master, enum dev_state *state)
1254 struct fw_hello_cmd c;
1256 unsigned int master_mbox;
1257 int retries = FW_CMD_HELLO_RETRIES;
1260 memset(&c, 0, sizeof(c));
1261 INIT_CMD(c, HELLO, WRITE);
1262 c.err_to_clearinit = cpu_to_be32(
1263 V_FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
1264 V_FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
1265 V_FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
1266 M_FW_HELLO_CMD_MBMASTER) |
1267 V_FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
1268 V_FW_HELLO_CMD_STAGE(FW_HELLO_CMD_STAGE_OS) |
1269 F_FW_HELLO_CMD_CLEARINIT);
1272 * Issue the HELLO command to the firmware. If it's not successful
1273 * but indicates that we got a "busy" or "timeout" condition, retry
1274 * the HELLO until we exhaust our retry limit. If we do exceed our
1275 * retry limit, check to see if the firmware left us any error
1276 * information and report that if so ...
1278 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1279 if (ret != FW_SUCCESS) {
1280 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
1282 if (t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_ERR)
1283 t4_report_fw_error(adap);
1287 v = be32_to_cpu(c.err_to_clearinit);
1288 master_mbox = G_FW_HELLO_CMD_MBMASTER(v);
1290 if (v & F_FW_HELLO_CMD_ERR)
1291 *state = DEV_STATE_ERR;
1292 else if (v & F_FW_HELLO_CMD_INIT)
1293 *state = DEV_STATE_INIT;
1295 *state = DEV_STATE_UNINIT;
1299 * If we're not the Master PF then we need to wait around for the
1300 * Master PF Driver to finish setting up the adapter.
1302 * Note that we also do this wait if we're a non-Master-capable PF and
1303 * there is no current Master PF; a Master PF may show up momentarily
1304 * and we wouldn't want to fail pointlessly. (This can happen when an
1305 * OS loads lots of different drivers rapidly at the same time). In
1306 * this case, the Master PF returned by the firmware will be
1307 * M_PCIE_FW_MASTER so the test below will work ...
1309 if ((v & (F_FW_HELLO_CMD_ERR | F_FW_HELLO_CMD_INIT)) == 0 &&
1310 master_mbox != mbox) {
1311 int waiting = FW_CMD_HELLO_TIMEOUT;
1314 * Wait for the firmware to either indicate an error or
1315 * initialized state. If we see either of these we bail out
1316 * and report the issue to the caller. If we exhaust the
1317 * "hello timeout" and we haven't exhausted our retries, try
1318 * again. Otherwise bail with a timeout error.
1327 * If neither Error nor Initialialized are indicated
1328 * by the firmware keep waiting till we exaust our
1329 * timeout ... and then retry if we haven't exhausted
1332 pcie_fw = t4_read_reg(adap, A_PCIE_FW);
1333 if (!(pcie_fw & (F_PCIE_FW_ERR | F_PCIE_FW_INIT))) {
1344 * We either have an Error or Initialized condition
1345 * report errors preferentially.
1348 if (pcie_fw & F_PCIE_FW_ERR)
1349 *state = DEV_STATE_ERR;
1350 else if (pcie_fw & F_PCIE_FW_INIT)
1351 *state = DEV_STATE_INIT;
1355 * If we arrived before a Master PF was selected and
1356 * there's not a valid Master PF, grab its identity
1359 if (master_mbox == M_PCIE_FW_MASTER &&
1360 (pcie_fw & F_PCIE_FW_MASTER_VLD))
1361 master_mbox = G_PCIE_FW_MASTER(pcie_fw);
1370 * t4_fw_bye - end communication with FW
1371 * @adap: the adapter
1372 * @mbox: mailbox to use for the FW command
1374 * Issues a command to terminate communication with FW.
1376 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
1378 struct fw_bye_cmd c;
1380 memset(&c, 0, sizeof(c));
1381 INIT_CMD(c, BYE, WRITE);
1382 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1386 * t4_fw_reset - issue a reset to FW
1387 * @adap: the adapter
1388 * @mbox: mailbox to use for the FW command
1389 * @reset: specifies the type of reset to perform
1391 * Issues a reset command of the specified type to FW.
1393 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
1395 struct fw_reset_cmd c;
1397 memset(&c, 0, sizeof(c));
1398 INIT_CMD(c, RESET, WRITE);
1399 c.val = cpu_to_be32(reset);
1400 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1404 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
1405 * @adap: the adapter
1406 * @mbox: mailbox to use for the FW RESET command (if desired)
1407 * @force: force uP into RESET even if FW RESET command fails
1409 * Issues a RESET command to firmware (if desired) with a HALT indication
1410 * and then puts the microprocessor into RESET state. The RESET command
1411 * will only be issued if a legitimate mailbox is provided (mbox <=
1412 * M_PCIE_FW_MASTER).
1414 * This is generally used in order for the host to safely manipulate the
1415 * adapter without fear of conflicting with whatever the firmware might
1416 * be doing. The only way out of this state is to RESTART the firmware
1419 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
1424 * If a legitimate mailbox is provided, issue a RESET command
1425 * with a HALT indication.
1427 if (mbox <= M_PCIE_FW_MASTER) {
1428 struct fw_reset_cmd c;
1430 memset(&c, 0, sizeof(c));
1431 INIT_CMD(c, RESET, WRITE);
1432 c.val = cpu_to_be32(F_PIORST | F_PIORSTMODE);
1433 c.halt_pkd = cpu_to_be32(F_FW_RESET_CMD_HALT);
1434 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1438 * Normally we won't complete the operation if the firmware RESET
1439 * command fails but if our caller insists we'll go ahead and put the
1440 * uP into RESET. This can be useful if the firmware is hung or even
1441 * missing ... We'll have to take the risk of putting the uP into
1442 * RESET without the cooperation of firmware in that case.
1444 * We also force the firmware's HALT flag to be on in case we bypassed
1445 * the firmware RESET command above or we're dealing with old firmware
1446 * which doesn't have the HALT capability. This will serve as a flag
1447 * for the incoming firmware to know that it's coming out of a HALT
1448 * rather than a RESET ... if it's new enough to understand that ...
1450 if (ret == 0 || force) {
1451 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, F_UPCRST);
1452 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT,
1457 * And we always return the result of the firmware RESET command
1458 * even when we force the uP into RESET ...
1464 * t4_fw_restart - restart the firmware by taking the uP out of RESET
1465 * @adap: the adapter
1466 * @mbox: mailbox to use for the FW RESET command (if desired)
1467 * @reset: if we want to do a RESET to restart things
1469 * Restart firmware previously halted by t4_fw_halt(). On successful
1470 * return the previous PF Master remains as the new PF Master and there
1471 * is no need to issue a new HELLO command, etc.
1473 * We do this in two ways:
1475 * 1. If we're dealing with newer firmware we'll simply want to take
1476 * the chip's microprocessor out of RESET. This will cause the
1477 * firmware to start up from its start vector. And then we'll loop
1478 * until the firmware indicates it's started again (PCIE_FW.HALT
1479 * reset to 0) or we timeout.
1481 * 2. If we're dealing with older firmware then we'll need to RESET
1482 * the chip since older firmware won't recognize the PCIE_FW.HALT
1483 * flag and automatically RESET itself on startup.
1485 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
1489 * Since we're directing the RESET instead of the firmware
1490 * doing it automatically, we need to clear the PCIE_FW.HALT
1493 t4_set_reg_field(adap, A_PCIE_FW, F_PCIE_FW_HALT, 0);
1496 * If we've been given a valid mailbox, first try to get the
1497 * firmware to do the RESET. If that works, great and we can
1498 * return success. Otherwise, if we haven't been given a
1499 * valid mailbox or the RESET command failed, fall back to
1500 * hitting the chip with a hammer.
1502 if (mbox <= M_PCIE_FW_MASTER) {
1503 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
1505 if (t4_fw_reset(adap, mbox,
1506 F_PIORST | F_PIORSTMODE) == 0)
1510 t4_write_reg(adap, A_PL_RST, F_PIORST | F_PIORSTMODE);
1515 t4_set_reg_field(adap, A_CIM_BOOT_CFG, F_UPCRST, 0);
1516 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
1517 if (!(t4_read_reg(adap, A_PCIE_FW) & F_PCIE_FW_HALT))
1528 * t4_fixup_host_params_compat - fix up host-dependent parameters
1529 * @adap: the adapter
1530 * @page_size: the host's Base Page Size
1531 * @cache_line_size: the host's Cache Line Size
1532 * @chip_compat: maintain compatibility with designated chip
1534 * Various registers in the chip contain values which are dependent on the
1535 * host's Base Page and Cache Line Sizes. This function will fix all of
1536 * those registers with the appropriate values as passed in ...
1538 * @chip_compat is used to limit the set of changes that are made
1539 * to be compatible with the indicated chip release. This is used by
1540 * drivers to maintain compatibility with chip register settings when
1541 * the drivers haven't [yet] been updated with new chip support.
1543 int t4_fixup_host_params_compat(struct adapter *adap,
1544 unsigned int page_size,
1545 unsigned int cache_line_size,
1546 enum chip_type chip_compat)
1548 unsigned int page_shift = cxgbe_fls(page_size) - 1;
1549 unsigned int sge_hps = page_shift - 10;
1550 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
1551 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
1552 unsigned int fl_align_log = cxgbe_fls(fl_align) - 1;
1554 t4_write_reg(adap, A_SGE_HOST_PAGE_SIZE,
1555 V_HOSTPAGESIZEPF0(sge_hps) |
1556 V_HOSTPAGESIZEPF1(sge_hps) |
1557 V_HOSTPAGESIZEPF2(sge_hps) |
1558 V_HOSTPAGESIZEPF3(sge_hps) |
1559 V_HOSTPAGESIZEPF4(sge_hps) |
1560 V_HOSTPAGESIZEPF5(sge_hps) |
1561 V_HOSTPAGESIZEPF6(sge_hps) |
1562 V_HOSTPAGESIZEPF7(sge_hps));
1564 if (is_t4(adap->params.chip) || is_t4(chip_compat))
1565 t4_set_reg_field(adap, A_SGE_CONTROL,
1566 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
1567 F_EGRSTATUSPAGESIZE,
1568 V_INGPADBOUNDARY(fl_align_log -
1569 X_INGPADBOUNDARY_SHIFT) |
1570 V_EGRSTATUSPAGESIZE(stat_len != 64));
1573 * T5 introduced the separation of the Free List Padding and
1574 * Packing Boundaries. Thus, we can select a smaller Padding
1575 * Boundary to avoid uselessly chewing up PCIe Link and Memory
1576 * Bandwidth, and use a Packing Boundary which is large enough
1577 * to avoid false sharing between CPUs, etc.
1579 * For the PCI Link, the smaller the Padding Boundary the
1580 * better. For the Memory Controller, a smaller Padding
1581 * Boundary is better until we cross under the Memory Line
1582 * Size (the minimum unit of transfer to/from Memory). If we
1583 * have a Padding Boundary which is smaller than the Memory
1584 * Line Size, that'll involve a Read-Modify-Write cycle on the
1585 * Memory Controller which is never good. For T5 the smallest
1586 * Padding Boundary which we can select is 32 bytes which is
1587 * larger than any known Memory Controller Line Size so we'll
1592 * N.B. T5 has a different interpretation of the "0" value for
1593 * the Packing Boundary. This corresponds to 16 bytes instead
1594 * of the expected 32 bytes. We never have a Packing Boundary
1595 * less than 32 bytes so we can't use that special value but
1596 * on the other hand, if we wanted 32 bytes, the best we can
1597 * really do is 64 bytes ...
1599 if (fl_align <= 32) {
1603 t4_set_reg_field(adap, A_SGE_CONTROL,
1604 V_INGPADBOUNDARY(M_INGPADBOUNDARY) |
1605 F_EGRSTATUSPAGESIZE,
1606 V_INGPADBOUNDARY(X_INGPCIEBOUNDARY_32B) |
1607 V_EGRSTATUSPAGESIZE(stat_len != 64));
1608 t4_set_reg_field(adap, A_SGE_CONTROL2,
1609 V_INGPACKBOUNDARY(M_INGPACKBOUNDARY),
1610 V_INGPACKBOUNDARY(fl_align_log -
1611 X_INGPACKBOUNDARY_SHIFT));
1615 * Adjust various SGE Free List Host Buffer Sizes.
1617 * The first four entries are:
1621 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
1622 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
1624 * For the single-MTU buffers in unpacked mode we need to include
1625 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
1626 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
1627 * Padding boundary. All of these are accommodated in the Factory
1628 * Default Firmware Configuration File but we need to adjust it for
1629 * this host's cache line size.
1631 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE0, page_size);
1632 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE2,
1633 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE2) + fl_align - 1)
1635 t4_write_reg(adap, A_SGE_FL_BUFFER_SIZE3,
1636 (t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE3) + fl_align - 1)
1639 t4_write_reg(adap, A_ULP_RX_TDDP_PSZ, V_HPZ0(page_shift - 12));
1645 * t4_fixup_host_params - fix up host-dependent parameters (T4 compatible)
1646 * @adap: the adapter
1647 * @page_size: the host's Base Page Size
1648 * @cache_line_size: the host's Cache Line Size
1650 * Various registers in T4 contain values which are dependent on the
1651 * host's Base Page and Cache Line Sizes. This function will fix all of
1652 * those registers with the appropriate values as passed in ...
1654 * This routine makes changes which are compatible with T4 chips.
1656 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1657 unsigned int cache_line_size)
1659 return t4_fixup_host_params_compat(adap, page_size, cache_line_size,
1664 * t4_fw_initialize - ask FW to initialize the device
1665 * @adap: the adapter
1666 * @mbox: mailbox to use for the FW command
1668 * Issues a command to FW to partially initialize the device. This
1669 * performs initialization that generally doesn't depend on user input.
1671 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
1673 struct fw_initialize_cmd c;
1675 memset(&c, 0, sizeof(c));
1676 INIT_CMD(c, INITIALIZE, WRITE);
1677 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1681 * t4_query_params_rw - query FW or device parameters
1682 * @adap: the adapter
1683 * @mbox: mailbox to use for the FW command
1686 * @nparams: the number of parameters
1687 * @params: the parameter names
1688 * @val: the parameter values
1689 * @rw: Write and read flag
1691 * Reads the value of FW or device parameters. Up to 7 parameters can be
1694 static int t4_query_params_rw(struct adapter *adap, unsigned int mbox,
1695 unsigned int pf, unsigned int vf,
1696 unsigned int nparams, const u32 *params,
1701 struct fw_params_cmd c;
1702 __be32 *p = &c.param[0].mnem;
1707 memset(&c, 0, sizeof(c));
1708 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
1709 F_FW_CMD_REQUEST | F_FW_CMD_READ |
1710 V_FW_PARAMS_CMD_PFN(pf) |
1711 V_FW_PARAMS_CMD_VFN(vf));
1712 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1714 for (i = 0; i < nparams; i++) {
1715 *p++ = cpu_to_be32(*params++);
1717 *p = cpu_to_be32(*(val + i));
1721 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1723 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
1724 *val++ = be32_to_cpu(*p);
1728 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1729 unsigned int vf, unsigned int nparams, const u32 *params,
1732 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
1736 * t4_set_params_timeout - sets FW or device parameters
1737 * @adap: the adapter
1738 * @mbox: mailbox to use for the FW command
1741 * @nparams: the number of parameters
1742 * @params: the parameter names
1743 * @val: the parameter values
1744 * @timeout: the timeout time
1746 * Sets the value of FW or device parameters. Up to 7 parameters can be
1747 * specified at once.
1749 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
1750 unsigned int pf, unsigned int vf,
1751 unsigned int nparams, const u32 *params,
1752 const u32 *val, int timeout)
1754 struct fw_params_cmd c;
1755 __be32 *p = &c.param[0].mnem;
1760 memset(&c, 0, sizeof(c));
1761 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_PARAMS_CMD) |
1762 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1763 V_FW_PARAMS_CMD_PFN(pf) |
1764 V_FW_PARAMS_CMD_VFN(vf));
1765 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1768 *p++ = cpu_to_be32(*params++);
1769 *p++ = cpu_to_be32(*val++);
1772 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
1775 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1776 unsigned int vf, unsigned int nparams, const u32 *params,
1779 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
1780 FW_CMD_MAX_TIMEOUT);
1784 * t4_alloc_vi_func - allocate a virtual interface
1785 * @adap: the adapter
1786 * @mbox: mailbox to use for the FW command
1787 * @port: physical port associated with the VI
1788 * @pf: the PF owning the VI
1789 * @vf: the VF owning the VI
1790 * @nmac: number of MAC addresses needed (1 to 5)
1791 * @mac: the MAC addresses of the VI
1792 * @rss_size: size of RSS table slice associated with this VI
1793 * @portfunc: which Port Application Function MAC Address is desired
1794 * @idstype: Intrusion Detection Type
1796 * Allocates a virtual interface for the given physical port. If @mac is
1797 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
1798 * @mac should be large enough to hold @nmac Ethernet addresses, they are
1799 * stored consecutively so the space needed is @nmac * 6 bytes.
1800 * Returns a negative error number or the non-negative VI id.
1802 int t4_alloc_vi_func(struct adapter *adap, unsigned int mbox,
1803 unsigned int port, unsigned int pf, unsigned int vf,
1804 unsigned int nmac, u8 *mac, unsigned int *rss_size,
1805 unsigned int portfunc, unsigned int idstype)
1810 memset(&c, 0, sizeof(c));
1811 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
1812 F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1813 V_FW_VI_CMD_PFN(pf) | V_FW_VI_CMD_VFN(vf));
1814 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_ALLOC | FW_LEN16(c));
1815 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_TYPE(idstype) |
1816 V_FW_VI_CMD_FUNC(portfunc));
1817 c.portid_pkd = V_FW_VI_CMD_PORTID(port);
1820 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1825 memcpy(mac, c.mac, sizeof(c.mac));
1828 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
1831 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
1834 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
1837 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
1842 *rss_size = G_FW_VI_CMD_RSSSIZE(be16_to_cpu(c.norss_rsssize));
1843 return G_FW_VI_CMD_VIID(cpu_to_be16(c.type_to_viid));
1847 * t4_alloc_vi - allocate an [Ethernet Function] virtual interface
1848 * @adap: the adapter
1849 * @mbox: mailbox to use for the FW command
1850 * @port: physical port associated with the VI
1851 * @pf: the PF owning the VI
1852 * @vf: the VF owning the VI
1853 * @nmac: number of MAC addresses needed (1 to 5)
1854 * @mac: the MAC addresses of the VI
1855 * @rss_size: size of RSS table slice associated with this VI
1857 * Backwards compatible and convieniance routine to allocate a Virtual
1858 * Interface with a Ethernet Port Application Function and Intrustion
1859 * Detection System disabled.
1861 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1862 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1863 unsigned int *rss_size)
1865 return t4_alloc_vi_func(adap, mbox, port, pf, vf, nmac, mac, rss_size,
1870 * t4_free_vi - free a virtual interface
1871 * @adap: the adapter
1872 * @mbox: mailbox to use for the FW command
1873 * @pf: the PF owning the VI
1874 * @vf: the VF owning the VI
1875 * @viid: virtual interface identifiler
1877 * Free a previously allocated virtual interface.
1879 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
1880 unsigned int vf, unsigned int viid)
1884 memset(&c, 0, sizeof(c));
1885 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_VI_CMD) | F_FW_CMD_REQUEST |
1886 F_FW_CMD_EXEC | V_FW_VI_CMD_PFN(pf) |
1887 V_FW_VI_CMD_VFN(vf));
1888 c.alloc_to_len16 = cpu_to_be32(F_FW_VI_CMD_FREE | FW_LEN16(c));
1889 c.type_to_viid = cpu_to_be16(V_FW_VI_CMD_VIID(viid));
1891 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1895 * t4_set_rxmode - set Rx properties of a virtual interface
1896 * @adap: the adapter
1897 * @mbox: mailbox to use for the FW command
1899 * @mtu: the new MTU or -1
1900 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
1901 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
1902 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
1903 * @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
1905 * @sleep_ok: if true we may sleep while awaiting command completion
1907 * Sets Rx properties of a virtual interface.
1909 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
1910 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1913 struct fw_vi_rxmode_cmd c;
1915 /* convert to FW values */
1917 mtu = M_FW_VI_RXMODE_CMD_MTU;
1919 promisc = M_FW_VI_RXMODE_CMD_PROMISCEN;
1921 all_multi = M_FW_VI_RXMODE_CMD_ALLMULTIEN;
1923 bcast = M_FW_VI_RXMODE_CMD_BROADCASTEN;
1925 vlanex = M_FW_VI_RXMODE_CMD_VLANEXEN;
1927 memset(&c, 0, sizeof(c));
1928 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_RXMODE_CMD) |
1929 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1930 V_FW_VI_RXMODE_CMD_VIID(viid));
1931 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
1932 c.mtu_to_vlanexen = cpu_to_be32(V_FW_VI_RXMODE_CMD_MTU(mtu) |
1933 V_FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
1934 V_FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
1935 V_FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
1936 V_FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
1937 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
1941 * t4_change_mac - modifies the exact-match filter for a MAC address
1942 * @adap: the adapter
1943 * @mbox: mailbox to use for the FW command
1945 * @idx: index of existing filter for old value of MAC address, or -1
1946 * @addr: the new MAC address value
1947 * @persist: whether a new MAC allocation should be persistent
1948 * @add_smt: if true also add the address to the HW SMT
1950 * Modifies an exact-match filter and sets it to the new MAC address if
1951 * @idx >= 0, or adds the MAC address to a new filter if @idx < 0. In the
1952 * latter case the address is added persistently if @persist is %true.
1954 * Note that in general it is not possible to modify the value of a given
1955 * filter so the generic way to modify an address filter is to free the one
1956 * being used by the old address value and allocate a new filter for the
1957 * new address value.
1959 * Returns a negative error number or the index of the filter with the new
1960 * MAC value. Note that this index may differ from @idx.
1962 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1963 int idx, const u8 *addr, bool persist, bool add_smt)
1966 struct fw_vi_mac_cmd c;
1967 struct fw_vi_mac_exact *p = c.u.exact;
1968 int max_mac_addr = adap->params.arch.mps_tcam_size;
1970 if (idx < 0) /* new allocation */
1971 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
1972 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
1974 memset(&c, 0, sizeof(c));
1975 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_MAC_CMD) |
1976 F_FW_CMD_REQUEST | F_FW_CMD_WRITE |
1977 V_FW_VI_MAC_CMD_VIID(viid));
1978 c.freemacs_to_len16 = cpu_to_be32(V_FW_CMD_LEN16(1));
1979 p->valid_to_idx = cpu_to_be16(F_FW_VI_MAC_CMD_VALID |
1980 V_FW_VI_MAC_CMD_SMAC_RESULT(mode) |
1981 V_FW_VI_MAC_CMD_IDX(idx));
1982 memcpy(p->macaddr, addr, sizeof(p->macaddr));
1984 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
1986 ret = G_FW_VI_MAC_CMD_IDX(be16_to_cpu(p->valid_to_idx));
1987 if (ret >= max_mac_addr)
1994 * t4_enable_vi_params - enable/disable a virtual interface
1995 * @adap: the adapter
1996 * @mbox: mailbox to use for the FW command
1998 * @rx_en: 1=enable Rx, 0=disable Rx
1999 * @tx_en: 1=enable Tx, 0=disable Tx
2000 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
2002 * Enables/disables a virtual interface. Note that setting DCB Enable
2003 * only makes sense when enabling a Virtual Interface ...
2005 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
2006 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
2008 struct fw_vi_enable_cmd c;
2010 memset(&c, 0, sizeof(c));
2011 c.op_to_viid = cpu_to_be32(V_FW_CMD_OP(FW_VI_ENABLE_CMD) |
2012 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2013 V_FW_VI_ENABLE_CMD_VIID(viid));
2014 c.ien_to_len16 = cpu_to_be32(V_FW_VI_ENABLE_CMD_IEN(rx_en) |
2015 V_FW_VI_ENABLE_CMD_EEN(tx_en) |
2016 V_FW_VI_ENABLE_CMD_DCB_INFO(dcb_en) |
2018 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
2022 * t4_enable_vi - enable/disable a virtual interface
2023 * @adap: the adapter
2024 * @mbox: mailbox to use for the FW command
2026 * @rx_en: 1=enable Rx, 0=disable Rx
2027 * @tx_en: 1=enable Tx, 0=disable Tx
2029 * Enables/disables a virtual interface. Note that setting DCB Enable
2030 * only makes sense when enabling a Virtual Interface ...
2032 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2033 bool rx_en, bool tx_en)
2035 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
2039 * t4_iq_start_stop - enable/disable an ingress queue and its FLs
2040 * @adap: the adapter
2041 * @mbox: mailbox to use for the FW command
2042 * @start: %true to enable the queues, %false to disable them
2043 * @pf: the PF owning the queues
2044 * @vf: the VF owning the queues
2045 * @iqid: ingress queue id
2046 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2047 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2049 * Starts or stops an ingress queue and its associated FLs, if any.
2051 int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
2052 unsigned int pf, unsigned int vf, unsigned int iqid,
2053 unsigned int fl0id, unsigned int fl1id)
2057 memset(&c, 0, sizeof(c));
2058 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
2059 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
2060 V_FW_IQ_CMD_VFN(vf));
2061 c.alloc_to_len16 = cpu_to_be32(V_FW_IQ_CMD_IQSTART(start) |
2062 V_FW_IQ_CMD_IQSTOP(!start) |
2064 c.iqid = cpu_to_be16(iqid);
2065 c.fl0id = cpu_to_be16(fl0id);
2066 c.fl1id = cpu_to_be16(fl1id);
2067 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2071 * t4_iq_free - free an ingress queue and its FLs
2072 * @adap: the adapter
2073 * @mbox: mailbox to use for the FW command
2074 * @pf: the PF owning the queues
2075 * @vf: the VF owning the queues
2076 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
2077 * @iqid: ingress queue id
2078 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2079 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2081 * Frees an ingress queue and its associated FLs, if any.
2083 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2084 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2085 unsigned int fl0id, unsigned int fl1id)
2089 memset(&c, 0, sizeof(c));
2090 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
2091 F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(pf) |
2092 V_FW_IQ_CMD_VFN(vf));
2093 c.alloc_to_len16 = cpu_to_be32(F_FW_IQ_CMD_FREE | FW_LEN16(c));
2094 c.type_to_iqandstindex = cpu_to_be32(V_FW_IQ_CMD_TYPE(iqtype));
2095 c.iqid = cpu_to_be16(iqid);
2096 c.fl0id = cpu_to_be16(fl0id);
2097 c.fl1id = cpu_to_be16(fl1id);
2098 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2102 * t4_eth_eq_free - free an Ethernet egress queue
2103 * @adap: the adapter
2104 * @mbox: mailbox to use for the FW command
2105 * @pf: the PF owning the queue
2106 * @vf: the VF owning the queue
2107 * @eqid: egress queue id
2109 * Frees an Ethernet egress queue.
2111 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2112 unsigned int vf, unsigned int eqid)
2114 struct fw_eq_eth_cmd c;
2116 memset(&c, 0, sizeof(c));
2117 c.op_to_vfn = cpu_to_be32(V_FW_CMD_OP(FW_EQ_ETH_CMD) |
2118 F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
2119 V_FW_EQ_ETH_CMD_PFN(pf) |
2120 V_FW_EQ_ETH_CMD_VFN(vf));
2121 c.alloc_to_len16 = cpu_to_be32(F_FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2122 c.eqid_pkd = cpu_to_be32(V_FW_EQ_ETH_CMD_EQID(eqid));
2123 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2127 * t4_handle_fw_rpl - process a FW reply message
2128 * @adap: the adapter
2129 * @rpl: start of the FW message
2131 * Processes a FW message, such as link state change messages.
2133 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2135 u8 opcode = *(const u8 *)rpl;
2138 * This might be a port command ... this simplifies the following
2139 * conditionals ... We can get away with pre-dereferencing
2140 * action_to_len16 because it's in the first 16 bytes and all messages
2141 * will be at least that long.
2143 const struct fw_port_cmd *p = (const void *)rpl;
2144 unsigned int action =
2145 G_FW_PORT_CMD_ACTION(be32_to_cpu(p->action_to_len16));
2147 if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
2148 /* link/module state change message */
2149 int speed = 0, fc = 0, i;
2150 int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
2151 struct port_info *pi = NULL;
2152 struct link_config *lc;
2153 u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
2154 int link_ok = (stat & F_FW_PORT_CMD_LSTATUS) != 0;
2155 u32 mod = G_FW_PORT_CMD_MODTYPE(stat);
2157 if (stat & F_FW_PORT_CMD_RXPAUSE)
2159 if (stat & F_FW_PORT_CMD_TXPAUSE)
2161 if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2162 speed = ETH_LINK_SPEED_100;
2163 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2164 speed = ETH_LINK_SPEED_1000;
2165 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2166 speed = ETH_LINK_SPEED_10000;
2167 else if (stat & V_FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
2168 speed = ETH_LINK_SPEED_40G;
2170 for_each_port(adap, i) {
2171 pi = adap2pinfo(adap, i);
2172 if (pi->tx_chan == chan)
2177 if (mod != pi->mod_type) {
2179 t4_os_portmod_changed(adap, i);
2181 if (link_ok != lc->link_ok || speed != lc->speed ||
2182 fc != lc->fc) { /* something changed */
2183 if (!link_ok && lc->link_ok) {
2184 static const char * const reason[] = {
2187 "Auto-negotiation Failure",
2189 "Insufficient Airflow",
2190 "Unable To Determine Reason",
2191 "No RX Signal Detected",
2194 unsigned int rc = G_FW_PORT_CMD_LINKDNRC(stat);
2196 dev_warn(adap, "Port %d link down, reason: %s\n",
2199 lc->link_ok = link_ok;
2202 lc->supported = be16_to_cpu(p->u.info.pcap);
2205 dev_warn(adap, "Unknown firmware reply %d\n", opcode);
2211 void t4_reset_link_config(struct adapter *adap, int idx)
2213 struct port_info *pi = adap2pinfo(adap, idx);
2214 struct link_config *lc = &pi->link_cfg;
2217 lc->requested_speed = 0;
2218 lc->requested_fc = 0;
2224 * init_link_config - initialize a link's SW state
2225 * @lc: structure holding the link state
2226 * @caps: link capabilities
2228 * Initializes the SW state maintained for each link, including the link's
2229 * capabilities and default speed/flow-control/autonegotiation settings.
2231 static void init_link_config(struct link_config *lc,
2234 lc->supported = caps;
2235 lc->requested_speed = 0;
2237 lc->requested_fc = 0;
2239 if (lc->supported & FW_PORT_CAP_ANEG) {
2240 lc->advertising = lc->supported & ADVERT_MASK;
2241 lc->autoneg = AUTONEG_ENABLE;
2243 lc->advertising = 0;
2244 lc->autoneg = AUTONEG_DISABLE;
2249 * t4_wait_dev_ready - wait till to reads of registers work
2251 * Right after the device is RESET is can take a small amount of time
2252 * for it to respond to register reads. Until then, all reads will
2253 * return either 0xff...ff or 0xee...ee. Return an error if reads
2254 * don't work within a reasonable time frame.
2256 static int t4_wait_dev_ready(struct adapter *adapter)
2260 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2262 if (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS)
2266 whoami = t4_read_reg(adapter, A_PL_WHOAMI);
2267 return (whoami != 0xffffffff && whoami != X_CIM_PF_NOACCESS
2272 u32 vendor_and_model_id;
2276 int t4_get_flash_params(struct adapter *adapter)
2279 * Table for non-Numonix supported flash parts. Numonix parts are left
2280 * to the preexisting well-tested code. All flash parts have 64KB
2283 static struct flash_desc supported_flash[] = {
2284 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
2291 ret = sf1_write(adapter, 1, 1, 0, SF_RD_ID);
2293 ret = sf1_read(adapter, 3, 0, 1, &info);
2294 t4_write_reg(adapter, A_SF_OP, 0); /* unlock SF */
2298 for (i = 0; i < ARRAY_SIZE(supported_flash); ++i)
2299 if (supported_flash[i].vendor_and_model_id == info) {
2300 adapter->params.sf_size = supported_flash[i].size_mb;
2301 adapter->params.sf_nsec =
2302 adapter->params.sf_size / SF_SEC_SIZE;
2306 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2308 info >>= 16; /* log2 of size */
2309 if (info >= 0x14 && info < 0x18)
2310 adapter->params.sf_nsec = 1 << (info - 16);
2311 else if (info == 0x18)
2312 adapter->params.sf_nsec = 64;
2315 adapter->params.sf_size = 1 << info;
2318 * We should reject adapters with FLASHes which are too small. So, emit
2321 if (adapter->params.sf_size < FLASH_MIN_SIZE) {
2322 dev_warn(adapter, "WARNING!!! FLASH size %#x < %#x!!!\n",
2323 adapter->params.sf_size, FLASH_MIN_SIZE);
2330 * t4_prep_adapter - prepare SW and HW for operation
2331 * @adapter: the adapter
2333 * Initialize adapter SW state for the various HW modules, set initial
2334 * values for some adapter tunables, take PHYs out of reset, and
2335 * initialize the MDIO interface.
2337 int t4_prep_adapter(struct adapter *adapter)
2342 ret = t4_wait_dev_ready(adapter);
2346 pl_rev = G_REV(t4_read_reg(adapter, A_PL_REV));
2347 adapter->params.pci.device_id = adapter->pdev->id.device_id;
2348 adapter->params.pci.vendor_id = adapter->pdev->id.vendor_id;
2351 * WE DON'T NEED adapter->params.chip CODE ONCE PL_REV CONTAINS
2352 * ADAPTER (VERSION << 4 | REVISION)
2354 ver = CHELSIO_PCI_ID_VER(adapter->params.pci.device_id);
2355 adapter->params.chip = 0;
2358 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
2359 adapter->params.arch.sge_fl_db = F_DBPRIO | F_DBTYPE;
2360 adapter->params.arch.mps_tcam_size =
2361 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
2362 adapter->params.arch.mps_rplc_size = 128;
2363 adapter->params.arch.nchan = NCHAN;
2364 adapter->params.arch.vfcount = 128;
2367 dev_err(adapter, "%s: Device %d is not supported\n",
2368 __func__, adapter->params.pci.device_id);
2372 ret = t4_get_flash_params(adapter);
2376 adapter->params.cim_la_size = CIMLA_SIZE;
2378 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2381 * Default port and clock for debugging in case we can't reach FW.
2383 adapter->params.nports = 1;
2384 adapter->params.portvec = 1;
2385 adapter->params.vpd.cclk = 50000;
2391 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
2392 * @adapter: the adapter
2393 * @qid: the Queue ID
2394 * @qtype: the Ingress or Egress type for @qid
2395 * @pbar2_qoffset: BAR2 Queue Offset
2396 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2398 * Returns the BAR2 SGE Queue Registers information associated with the
2399 * indicated Absolute Queue ID. These are passed back in return value
2400 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
2401 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
2403 * This may return an error which indicates that BAR2 SGE Queue
2404 * registers aren't available. If an error is not returned, then the
2405 * following values are returned:
2407 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
2408 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
2410 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
2411 * require the "Inferred Queue ID" ability may be used. E.g. the
2412 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
2413 * then these "Inferred Queue ID" register may not be used.
2415 int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
2416 enum t4_bar2_qtype qtype, u64 *pbar2_qoffset,
2417 unsigned int *pbar2_qid)
2419 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
2420 u64 bar2_page_offset, bar2_qoffset;
2421 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
2424 * T4 doesn't support BAR2 SGE Queue registers.
2426 if (is_t4(adapter->params.chip))
2430 * Get our SGE Page Size parameters.
2432 page_shift = adapter->params.sge.hps + 10;
2433 page_size = 1 << page_shift;
2436 * Get the right Queues per Page parameters for our Queue.
2438 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS ?
2439 adapter->params.sge.eq_qpp :
2440 adapter->params.sge.iq_qpp);
2441 qpp_mask = (1 << qpp_shift) - 1;
2444 * Calculate the basics of the BAR2 SGE Queue register area:
2445 * o The BAR2 page the Queue registers will be in.
2446 * o The BAR2 Queue ID.
2447 * o The BAR2 Queue ID Offset into the BAR2 page.
2449 bar2_page_offset = ((qid >> qpp_shift) << page_shift);
2450 bar2_qid = qid & qpp_mask;
2451 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
2454 * If the BAR2 Queue ID Offset is less than the Page Size, then the
2455 * hardware will infer the Absolute Queue ID simply from the writes to
2456 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
2457 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
2458 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
2459 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
2460 * from the BAR2 Page and BAR2 Queue ID.
2462 * One important censequence of this is that some BAR2 SGE registers
2463 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
2464 * there. But other registers synthesize the SGE Queue ID purely
2465 * from the writes to the registers -- the Write Combined Doorbell
2466 * Buffer is a good example. These BAR2 SGE Registers are only
2467 * available for those BAR2 SGE Register areas where the SGE Absolute
2468 * Queue ID can be inferred from simple writes.
2470 bar2_qoffset = bar2_page_offset;
2471 bar2_qinferred = (bar2_qid_offset < page_size);
2472 if (bar2_qinferred) {
2473 bar2_qoffset += bar2_qid_offset;
2477 *pbar2_qoffset = bar2_qoffset;
2478 *pbar2_qid = bar2_qid;
2483 * t4_init_sge_params - initialize adap->params.sge
2484 * @adapter: the adapter
2486 * Initialize various fields of the adapter's SGE Parameters structure.
2488 int t4_init_sge_params(struct adapter *adapter)
2490 struct sge_params *sge_params = &adapter->params.sge;
2492 unsigned int s_hps, s_qpp;
2495 * Extract the SGE Page Size for our PF.
2497 hps = t4_read_reg(adapter, A_SGE_HOST_PAGE_SIZE);
2498 s_hps = (S_HOSTPAGESIZEPF0 + (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) *
2500 sge_params->hps = ((hps >> s_hps) & M_HOSTPAGESIZEPF0);
2503 * Extract the SGE Egress and Ingess Queues Per Page for our PF.
2505 s_qpp = (S_QUEUESPERPAGEPF0 +
2506 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adapter->pf);
2507 qpp = t4_read_reg(adapter, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
2508 sge_params->eq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
2509 qpp = t4_read_reg(adapter, A_SGE_INGRESS_QUEUES_PER_PAGE_PF);
2510 sge_params->iq_qpp = ((qpp >> s_qpp) & M_QUEUESPERPAGEPF0);
2516 * t4_init_tp_params - initialize adap->params.tp
2517 * @adap: the adapter
2519 * Initialize various fields of the adapter's TP Parameters structure.
2521 int t4_init_tp_params(struct adapter *adap)
2526 v = t4_read_reg(adap, A_TP_TIMER_RESOLUTION);
2527 adap->params.tp.tre = G_TIMERRESOLUTION(v);
2528 adap->params.tp.dack_re = G_DELAYEDACKRESOLUTION(v);
2530 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
2531 for (chan = 0; chan < NCHAN; chan++)
2532 adap->params.tp.tx_modq[chan] = chan;
2535 * Cache the adapter's Compressed Filter Mode and global Incress
2538 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2539 &adap->params.tp.vlan_pri_map, 1, A_TP_VLAN_PRI_MAP);
2540 t4_read_indirect(adap, A_TP_PIO_ADDR, A_TP_PIO_DATA,
2541 &adap->params.tp.ingress_config, 1,
2542 A_TP_INGRESS_CONFIG);
2545 * Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
2546 * shift positions of several elements of the Compressed Filter Tuple
2547 * for this adapter which we need frequently ...
2549 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
2550 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
2551 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
2552 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
2556 * If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
2557 * represents the presense of an Outer VLAN instead of a VNIC ID.
2559 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
2560 adap->params.tp.vnic_shift = -1;
2566 * t4_filter_field_shift - calculate filter field shift
2567 * @adap: the adapter
2568 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
2570 * Return the shift position of a filter field within the Compressed
2571 * Filter Tuple. The filter field is specified via its selection bit
2572 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
2574 int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel)
2576 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
2580 if ((filter_mode & filter_sel) == 0)
2583 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
2584 switch (filter_mode & sel) {
2586 field_shift += W_FT_FCOE;
2589 field_shift += W_FT_PORT;
2592 field_shift += W_FT_VNIC_ID;
2595 field_shift += W_FT_VLAN;
2598 field_shift += W_FT_TOS;
2601 field_shift += W_FT_PROTOCOL;
2604 field_shift += W_FT_ETHERTYPE;
2607 field_shift += W_FT_MACMATCH;
2610 field_shift += W_FT_MPSHITTYPE;
2612 case F_FRAGMENTATION:
2613 field_shift += W_FT_FRAGMENTATION;
2620 int t4_init_rss_mode(struct adapter *adap, int mbox)
2623 struct fw_rss_vi_config_cmd rvc;
2625 memset(&rvc, 0, sizeof(rvc));
2627 for_each_port(adap, i) {
2628 struct port_info *p = adap2pinfo(adap, i);
2630 rvc.op_to_viid = htonl(V_FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2631 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2632 V_FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2633 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2634 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2637 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2642 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2646 struct fw_port_cmd c;
2648 memset(&c, 0, sizeof(c));
2650 for_each_port(adap, i) {
2651 unsigned int rss_size = 0;
2652 struct port_info *p = adap2pinfo(adap, i);
2654 while ((adap->params.portvec & (1 << j)) == 0)
2657 c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
2658 F_FW_CMD_REQUEST | F_FW_CMD_READ |
2659 V_FW_PORT_CMD_PORTID(j));
2660 c.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(
2661 FW_PORT_ACTION_GET_PORT_INFO) |
2663 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2667 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2673 p->rss_size = rss_size;
2674 t4_os_set_hw_addr(adap, i, addr);
2676 ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
2677 p->mdio_addr = (ret & F_FW_PORT_CMD_MDIOCAP) ?
2678 G_FW_PORT_CMD_MDIOADDR(ret) : -1;
2679 p->port_type = G_FW_PORT_CMD_PTYPE(ret);
2680 p->mod_type = FW_PORT_MOD_TYPE_NA;
2682 init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));