1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
117 /* RAR, Multicast, VLAN */
118 mac->ops.set_rar = ixgbe_set_rar_generic;
119 mac->ops.clear_rar = ixgbe_clear_rar_generic;
120 mac->ops.insert_mac_addr = NULL;
121 mac->ops.set_vmdq = NULL;
122 mac->ops.clear_vmdq = NULL;
123 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
124 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
125 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
126 mac->ops.enable_mc = ixgbe_enable_mc_generic;
127 mac->ops.disable_mc = ixgbe_disable_mc_generic;
128 mac->ops.clear_vfta = NULL;
129 mac->ops.set_vfta = NULL;
130 mac->ops.set_vlvf = NULL;
131 mac->ops.init_uta_tables = NULL;
132 mac->ops.enable_rx = ixgbe_enable_rx_generic;
133 mac->ops.disable_rx = ixgbe_disable_rx_generic;
136 mac->ops.fc_enable = ixgbe_fc_enable_generic;
137 mac->ops.setup_fc = ixgbe_setup_fc_generic;
140 mac->ops.get_link_capabilities = NULL;
141 mac->ops.setup_link = NULL;
142 mac->ops.check_link = NULL;
143 mac->ops.dmac_config = NULL;
144 mac->ops.dmac_update_tcs = NULL;
145 mac->ops.dmac_config_tcs = NULL;
147 return IXGBE_SUCCESS;
151 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
153 * @hw: pointer to hardware structure
155 * This function returns true if the device supports flow control
156 * autonegotiation, and false if it does not.
159 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
161 bool supported = false;
162 ixgbe_link_speed speed;
165 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
167 switch (hw->phy.media_type) {
168 case ixgbe_media_type_fiber_qsfp:
169 case ixgbe_media_type_fiber:
170 hw->mac.ops.check_link(hw, &speed, &link_up, false);
171 /* if link is down, assume supported */
173 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
178 case ixgbe_media_type_backplane:
181 case ixgbe_media_type_copper:
182 /* only some copper devices support flow control autoneg */
183 switch (hw->device_id) {
184 case IXGBE_DEV_ID_82599_T3_LOM:
185 case IXGBE_DEV_ID_X540T:
186 case IXGBE_DEV_ID_X540T1:
187 case IXGBE_DEV_ID_X550T:
188 case IXGBE_DEV_ID_X550EM_X_10G_T:
198 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
199 "Device %x does not support flow control autoneg",
205 * ixgbe_setup_fc_generic - Set up flow control
206 * @hw: pointer to hardware structure
208 * Called at init time to set up flow control.
210 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
212 s32 ret_val = IXGBE_SUCCESS;
213 u32 reg = 0, reg_bp = 0;
217 DEBUGFUNC("ixgbe_setup_fc_generic");
219 /* Validate the requested mode */
220 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
221 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
222 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
223 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
228 * 10gig parts do not have a word in the EEPROM to determine the
229 * default flow control setting, so we explicitly set it to full.
231 if (hw->fc.requested_mode == ixgbe_fc_default)
232 hw->fc.requested_mode = ixgbe_fc_full;
235 * Set up the 1G and 10G flow control advertisement registers so the
236 * HW will be able to do fc autoneg once the cable is plugged in. If
237 * we link at 10G, the 1G advertisement is harmless and vice versa.
239 switch (hw->phy.media_type) {
240 case ixgbe_media_type_backplane:
241 /* some MAC's need RMW protection on AUTOC */
242 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
243 if (ret_val != IXGBE_SUCCESS)
246 /* only backplane uses autoc so fall though */
247 case ixgbe_media_type_fiber_qsfp:
248 case ixgbe_media_type_fiber:
249 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
252 case ixgbe_media_type_copper:
253 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
254 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
261 * The possible values of fc.requested_mode are:
262 * 0: Flow control is completely disabled
263 * 1: Rx flow control is enabled (we can receive pause frames,
264 * but not send pause frames).
265 * 2: Tx flow control is enabled (we can send pause frames but
266 * we do not support receiving pause frames).
267 * 3: Both Rx and Tx flow control (symmetric) are enabled.
270 switch (hw->fc.requested_mode) {
272 /* Flow control completely disabled by software override. */
273 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
274 if (hw->phy.media_type == ixgbe_media_type_backplane)
275 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
276 IXGBE_AUTOC_ASM_PAUSE);
277 else if (hw->phy.media_type == ixgbe_media_type_copper)
278 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
280 case ixgbe_fc_tx_pause:
282 * Tx Flow control is enabled, and Rx Flow control is
283 * disabled by software override.
285 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
286 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
287 if (hw->phy.media_type == ixgbe_media_type_backplane) {
288 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
289 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
290 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
291 reg_cu |= IXGBE_TAF_ASM_PAUSE;
292 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
295 case ixgbe_fc_rx_pause:
297 * Rx Flow control is enabled and Tx Flow control is
298 * disabled by software override. Since there really
299 * isn't a way to advertise that we are capable of RX
300 * Pause ONLY, we will advertise that we support both
301 * symmetric and asymmetric Rx PAUSE, as such we fall
302 * through to the fc_full statement. Later, we will
303 * disable the adapter's ability to send PAUSE frames.
306 /* Flow control (both Rx and Tx) is enabled by SW override. */
307 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
308 if (hw->phy.media_type == ixgbe_media_type_backplane)
309 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
310 IXGBE_AUTOC_ASM_PAUSE;
311 else if (hw->phy.media_type == ixgbe_media_type_copper)
312 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
315 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
316 "Flow control param set incorrectly\n");
317 ret_val = IXGBE_ERR_CONFIG;
322 if (hw->mac.type < ixgbe_mac_X540) {
324 * Enable auto-negotiation between the MAC & PHY;
325 * the MAC will advertise clause 37 flow control.
327 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
328 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
330 /* Disable AN timeout */
331 if (hw->fc.strict_ieee)
332 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
334 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
335 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
339 * AUTOC restart handles negotiation of 1G and 10G on backplane
340 * and copper. There is no need to set the PCS1GCTL register.
343 if (hw->phy.media_type == ixgbe_media_type_backplane) {
344 reg_bp |= IXGBE_AUTOC_AN_RESTART;
345 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
348 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
349 (ixgbe_device_supports_autoneg_fc(hw))) {
350 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
351 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
354 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
360 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
361 * @hw: pointer to hardware structure
363 * Starts the hardware by filling the bus info structure and media type, clears
364 * all on chip counters, initializes receive address registers, multicast
365 * table, VLAN filter table, calls routine to set up link and flow control
366 * settings, and leaves transmit and receive units disabled and uninitialized
368 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
373 DEBUGFUNC("ixgbe_start_hw_generic");
375 /* Set the media type */
376 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
378 /* PHY ops initialization must be done in reset_hw() */
380 /* Clear the VLAN filter table */
381 hw->mac.ops.clear_vfta(hw);
383 /* Clear statistics registers */
384 hw->mac.ops.clear_hw_cntrs(hw);
386 /* Set No Snoop Disable */
387 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
388 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
389 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
390 IXGBE_WRITE_FLUSH(hw);
392 /* Setup flow control */
393 ret_val = ixgbe_setup_fc(hw);
394 if (ret_val != IXGBE_SUCCESS)
397 /* Clear adapter stopped flag */
398 hw->adapter_stopped = false;
405 * ixgbe_start_hw_gen2 - Init sequence for common device family
406 * @hw: pointer to hw structure
408 * Performs the init sequence common to the second generation
410 * Devices in the second generation:
414 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
419 /* Clear the rate limiters */
420 for (i = 0; i < hw->mac.max_tx_queues; i++) {
421 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
422 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
424 IXGBE_WRITE_FLUSH(hw);
426 /* Disable relaxed ordering */
427 for (i = 0; i < hw->mac.max_tx_queues; i++) {
428 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
429 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
430 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
433 for (i = 0; i < hw->mac.max_rx_queues; i++) {
434 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
435 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
436 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
437 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
440 return IXGBE_SUCCESS;
444 * ixgbe_init_hw_generic - Generic hardware initialization
445 * @hw: pointer to hardware structure
447 * Initialize the hardware by resetting the hardware, filling the bus info
448 * structure and media type, clears all on chip counters, initializes receive
449 * address registers, multicast table, VLAN filter table, calls routine to set
450 * up link and flow control settings, and leaves transmit and receive units
451 * disabled and uninitialized
453 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
457 DEBUGFUNC("ixgbe_init_hw_generic");
459 /* Reset the hardware */
460 status = hw->mac.ops.reset_hw(hw);
462 if (status == IXGBE_SUCCESS) {
464 status = hw->mac.ops.start_hw(hw);
471 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
472 * @hw: pointer to hardware structure
474 * Clears all hardware statistics counters by reading them from the hardware
475 * Statistics counters are clear on read.
477 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
481 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
483 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
484 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
485 IXGBE_READ_REG(hw, IXGBE_ERRBC);
486 IXGBE_READ_REG(hw, IXGBE_MSPDC);
487 for (i = 0; i < 8; i++)
488 IXGBE_READ_REG(hw, IXGBE_MPC(i));
490 IXGBE_READ_REG(hw, IXGBE_MLFC);
491 IXGBE_READ_REG(hw, IXGBE_MRFC);
492 IXGBE_READ_REG(hw, IXGBE_RLEC);
493 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
494 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
495 if (hw->mac.type >= ixgbe_mac_82599EB) {
496 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
497 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
499 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
500 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
503 for (i = 0; i < 8; i++) {
504 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
505 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
506 if (hw->mac.type >= ixgbe_mac_82599EB) {
507 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
508 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
510 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
511 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
514 if (hw->mac.type >= ixgbe_mac_82599EB)
515 for (i = 0; i < 8; i++)
516 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
517 IXGBE_READ_REG(hw, IXGBE_PRC64);
518 IXGBE_READ_REG(hw, IXGBE_PRC127);
519 IXGBE_READ_REG(hw, IXGBE_PRC255);
520 IXGBE_READ_REG(hw, IXGBE_PRC511);
521 IXGBE_READ_REG(hw, IXGBE_PRC1023);
522 IXGBE_READ_REG(hw, IXGBE_PRC1522);
523 IXGBE_READ_REG(hw, IXGBE_GPRC);
524 IXGBE_READ_REG(hw, IXGBE_BPRC);
525 IXGBE_READ_REG(hw, IXGBE_MPRC);
526 IXGBE_READ_REG(hw, IXGBE_GPTC);
527 IXGBE_READ_REG(hw, IXGBE_GORCL);
528 IXGBE_READ_REG(hw, IXGBE_GORCH);
529 IXGBE_READ_REG(hw, IXGBE_GOTCL);
530 IXGBE_READ_REG(hw, IXGBE_GOTCH);
531 if (hw->mac.type == ixgbe_mac_82598EB)
532 for (i = 0; i < 8; i++)
533 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
534 IXGBE_READ_REG(hw, IXGBE_RUC);
535 IXGBE_READ_REG(hw, IXGBE_RFC);
536 IXGBE_READ_REG(hw, IXGBE_ROC);
537 IXGBE_READ_REG(hw, IXGBE_RJC);
538 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
539 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
540 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
541 IXGBE_READ_REG(hw, IXGBE_TORL);
542 IXGBE_READ_REG(hw, IXGBE_TORH);
543 IXGBE_READ_REG(hw, IXGBE_TPR);
544 IXGBE_READ_REG(hw, IXGBE_TPT);
545 IXGBE_READ_REG(hw, IXGBE_PTC64);
546 IXGBE_READ_REG(hw, IXGBE_PTC127);
547 IXGBE_READ_REG(hw, IXGBE_PTC255);
548 IXGBE_READ_REG(hw, IXGBE_PTC511);
549 IXGBE_READ_REG(hw, IXGBE_PTC1023);
550 IXGBE_READ_REG(hw, IXGBE_PTC1522);
551 IXGBE_READ_REG(hw, IXGBE_MPTC);
552 IXGBE_READ_REG(hw, IXGBE_BPTC);
553 for (i = 0; i < 16; i++) {
554 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
555 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
556 if (hw->mac.type >= ixgbe_mac_82599EB) {
557 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
558 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
559 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
560 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
561 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
563 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
564 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
568 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
570 ixgbe_identify_phy(hw);
571 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
572 IXGBE_MDIO_PCS_DEV_TYPE, &i);
573 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
574 IXGBE_MDIO_PCS_DEV_TYPE, &i);
575 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
576 IXGBE_MDIO_PCS_DEV_TYPE, &i);
577 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
578 IXGBE_MDIO_PCS_DEV_TYPE, &i);
581 return IXGBE_SUCCESS;
585 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
586 * @hw: pointer to hardware structure
587 * @pba_num: stores the part number string from the EEPROM
588 * @pba_num_size: part number string buffer length
590 * Reads the part number string from the EEPROM.
592 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
601 DEBUGFUNC("ixgbe_read_pba_string_generic");
603 if (pba_num == NULL) {
604 DEBUGOUT("PBA string buffer was null\n");
605 return IXGBE_ERR_INVALID_ARGUMENT;
608 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
610 DEBUGOUT("NVM Read Error\n");
614 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
616 DEBUGOUT("NVM Read Error\n");
621 * if data is not ptr guard the PBA must be in legacy format which
622 * means pba_ptr is actually our second data word for the PBA number
623 * and we can decode it into an ascii string
625 if (data != IXGBE_PBANUM_PTR_GUARD) {
626 DEBUGOUT("NVM PBA number is not stored as string\n");
628 /* we will need 11 characters to store the PBA */
629 if (pba_num_size < 11) {
630 DEBUGOUT("PBA string buffer too small\n");
631 return IXGBE_ERR_NO_SPACE;
634 /* extract hex string from data and pba_ptr */
635 pba_num[0] = (data >> 12) & 0xF;
636 pba_num[1] = (data >> 8) & 0xF;
637 pba_num[2] = (data >> 4) & 0xF;
638 pba_num[3] = data & 0xF;
639 pba_num[4] = (pba_ptr >> 12) & 0xF;
640 pba_num[5] = (pba_ptr >> 8) & 0xF;
643 pba_num[8] = (pba_ptr >> 4) & 0xF;
644 pba_num[9] = pba_ptr & 0xF;
646 /* put a null character on the end of our string */
649 /* switch all the data but the '-' to hex char */
650 for (offset = 0; offset < 10; offset++) {
651 if (pba_num[offset] < 0xA)
652 pba_num[offset] += '0';
653 else if (pba_num[offset] < 0x10)
654 pba_num[offset] += 'A' - 0xA;
657 return IXGBE_SUCCESS;
660 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
662 DEBUGOUT("NVM Read Error\n");
666 if (length == 0xFFFF || length == 0) {
667 DEBUGOUT("NVM PBA number section invalid length\n");
668 return IXGBE_ERR_PBA_SECTION;
671 /* check if pba_num buffer is big enough */
672 if (pba_num_size < (((u32)length * 2) - 1)) {
673 DEBUGOUT("PBA string buffer too small\n");
674 return IXGBE_ERR_NO_SPACE;
677 /* trim pba length from start of string */
681 for (offset = 0; offset < length; offset++) {
682 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
684 DEBUGOUT("NVM Read Error\n");
687 pba_num[offset * 2] = (u8)(data >> 8);
688 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
690 pba_num[offset * 2] = '\0';
692 return IXGBE_SUCCESS;
696 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
697 * @hw: pointer to hardware structure
698 * @pba_num: stores the part number from the EEPROM
700 * Reads the part number from the EEPROM.
702 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
707 DEBUGFUNC("ixgbe_read_pba_num_generic");
709 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
711 DEBUGOUT("NVM Read Error\n");
713 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
714 DEBUGOUT("NVM Not supported\n");
715 return IXGBE_NOT_IMPLEMENTED;
717 *pba_num = (u32)(data << 16);
719 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
721 DEBUGOUT("NVM Read Error\n");
726 return IXGBE_SUCCESS;
731 * @hw: pointer to the HW structure
732 * @eeprom_buf: optional pointer to EEPROM image
733 * @eeprom_buf_size: size of EEPROM image in words
734 * @max_pba_block_size: PBA block size limit
735 * @pba: pointer to output PBA structure
737 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
738 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
741 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
742 u32 eeprom_buf_size, u16 max_pba_block_size,
743 struct ixgbe_pba *pba)
749 return IXGBE_ERR_PARAM;
751 if (eeprom_buf == NULL) {
752 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
757 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
758 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
759 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
761 return IXGBE_ERR_PARAM;
765 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
766 if (pba->pba_block == NULL)
767 return IXGBE_ERR_PARAM;
769 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
775 if (pba_block_size > max_pba_block_size)
776 return IXGBE_ERR_PARAM;
778 if (eeprom_buf == NULL) {
779 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
785 if (eeprom_buf_size > (u32)(pba->word[1] +
787 memcpy(pba->pba_block,
788 &eeprom_buf[pba->word[1]],
789 pba_block_size * sizeof(u16));
791 return IXGBE_ERR_PARAM;
796 return IXGBE_SUCCESS;
800 * ixgbe_write_pba_raw
801 * @hw: pointer to the HW structure
802 * @eeprom_buf: optional pointer to EEPROM image
803 * @eeprom_buf_size: size of EEPROM image in words
804 * @pba: pointer to PBA structure
806 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
807 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
810 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
811 u32 eeprom_buf_size, struct ixgbe_pba *pba)
816 return IXGBE_ERR_PARAM;
818 if (eeprom_buf == NULL) {
819 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
824 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
825 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
826 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
828 return IXGBE_ERR_PARAM;
832 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
833 if (pba->pba_block == NULL)
834 return IXGBE_ERR_PARAM;
836 if (eeprom_buf == NULL) {
837 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
843 if (eeprom_buf_size > (u32)(pba->word[1] +
844 pba->pba_block[0])) {
845 memcpy(&eeprom_buf[pba->word[1]],
847 pba->pba_block[0] * sizeof(u16));
849 return IXGBE_ERR_PARAM;
854 return IXGBE_SUCCESS;
858 * ixgbe_get_pba_block_size
859 * @hw: pointer to the HW structure
860 * @eeprom_buf: optional pointer to EEPROM image
861 * @eeprom_buf_size: size of EEPROM image in words
862 * @pba_data_size: pointer to output variable
864 * Returns the size of the PBA block in words. Function operates on EEPROM
865 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
869 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
870 u32 eeprom_buf_size, u16 *pba_block_size)
876 DEBUGFUNC("ixgbe_get_pba_block_size");
878 if (eeprom_buf == NULL) {
879 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
884 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
885 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
886 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
888 return IXGBE_ERR_PARAM;
892 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
893 if (eeprom_buf == NULL) {
894 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
899 if (eeprom_buf_size > pba_word[1])
900 length = eeprom_buf[pba_word[1] + 0];
902 return IXGBE_ERR_PARAM;
905 if (length == 0xFFFF || length == 0)
906 return IXGBE_ERR_PBA_SECTION;
908 /* PBA number in legacy format, there is no PBA Block. */
912 if (pba_block_size != NULL)
913 *pba_block_size = length;
915 return IXGBE_SUCCESS;
919 * ixgbe_get_mac_addr_generic - Generic get MAC address
920 * @hw: pointer to hardware structure
921 * @mac_addr: Adapter MAC address
923 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
924 * A reset of the adapter must be performed prior to calling this function
925 * in order for the MAC address to have been loaded from the EEPROM into RAR0
927 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
933 DEBUGFUNC("ixgbe_get_mac_addr_generic");
935 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
936 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
938 for (i = 0; i < 4; i++)
939 mac_addr[i] = (u8)(rar_low >> (i*8));
941 for (i = 0; i < 2; i++)
942 mac_addr[i+4] = (u8)(rar_high >> (i*8));
944 return IXGBE_SUCCESS;
948 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
949 * @hw: pointer to hardware structure
950 * @link_status: the link status returned by the PCI config space
952 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
954 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
956 struct ixgbe_mac_info *mac = &hw->mac;
958 if (hw->bus.type == ixgbe_bus_type_unknown)
959 hw->bus.type = ixgbe_bus_type_pci_express;
961 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
962 case IXGBE_PCI_LINK_WIDTH_1:
963 hw->bus.width = ixgbe_bus_width_pcie_x1;
965 case IXGBE_PCI_LINK_WIDTH_2:
966 hw->bus.width = ixgbe_bus_width_pcie_x2;
968 case IXGBE_PCI_LINK_WIDTH_4:
969 hw->bus.width = ixgbe_bus_width_pcie_x4;
971 case IXGBE_PCI_LINK_WIDTH_8:
972 hw->bus.width = ixgbe_bus_width_pcie_x8;
975 hw->bus.width = ixgbe_bus_width_unknown;
979 switch (link_status & IXGBE_PCI_LINK_SPEED) {
980 case IXGBE_PCI_LINK_SPEED_2500:
981 hw->bus.speed = ixgbe_bus_speed_2500;
983 case IXGBE_PCI_LINK_SPEED_5000:
984 hw->bus.speed = ixgbe_bus_speed_5000;
986 case IXGBE_PCI_LINK_SPEED_8000:
987 hw->bus.speed = ixgbe_bus_speed_8000;
990 hw->bus.speed = ixgbe_bus_speed_unknown;
994 mac->ops.set_lan_id(hw);
998 * ixgbe_get_bus_info_generic - Generic set PCI bus info
999 * @hw: pointer to hardware structure
1001 * Gets the PCI bus info (speed, width, type) then calls helper function to
1002 * store this data within the ixgbe_hw structure.
1004 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1008 DEBUGFUNC("ixgbe_get_bus_info_generic");
1010 /* Get the negotiated link width and speed from PCI config space */
1011 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1013 ixgbe_set_pci_config_data_generic(hw, link_status);
1015 return IXGBE_SUCCESS;
1019 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1020 * @hw: pointer to the HW structure
1022 * Determines the LAN function id by reading memory-mapped registers
1023 * and swaps the port value if requested.
1025 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1027 struct ixgbe_bus_info *bus = &hw->bus;
1030 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1032 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1033 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1034 bus->lan_id = bus->func;
1036 /* check for a port swap */
1037 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1038 if (reg & IXGBE_FACTPS_LFS)
1043 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1044 * @hw: pointer to hardware structure
1046 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1047 * disables transmit and receive units. The adapter_stopped flag is used by
1048 * the shared code and drivers to determine if the adapter is in a stopped
1049 * state and should not touch the hardware.
1051 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1056 DEBUGFUNC("ixgbe_stop_adapter_generic");
1059 * Set the adapter_stopped flag so other driver functions stop touching
1062 hw->adapter_stopped = true;
1064 /* Disable the receive unit */
1065 ixgbe_disable_rx(hw);
1067 /* Clear interrupt mask to stop interrupts from being generated */
1068 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1070 /* Clear any pending interrupts, flush previous writes */
1071 IXGBE_READ_REG(hw, IXGBE_EICR);
1073 /* Disable the transmit unit. Each queue must be disabled. */
1074 for (i = 0; i < hw->mac.max_tx_queues; i++)
1075 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1077 /* Disable the receive unit by stopping each queue */
1078 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1079 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1080 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1081 reg_val |= IXGBE_RXDCTL_SWFLSH;
1082 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1085 /* flush all queues disables */
1086 IXGBE_WRITE_FLUSH(hw);
1090 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1091 * access and verify no pending requests
1093 return ixgbe_disable_pcie_master(hw);
1097 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1098 * @hw: pointer to hardware structure
1099 * @index: led number to turn on
1101 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1103 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1105 DEBUGFUNC("ixgbe_led_on_generic");
1107 /* To turn on the LED, set mode to ON. */
1108 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1109 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1110 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1111 IXGBE_WRITE_FLUSH(hw);
1113 return IXGBE_SUCCESS;
1117 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1118 * @hw: pointer to hardware structure
1119 * @index: led number to turn off
1121 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1123 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1125 DEBUGFUNC("ixgbe_led_off_generic");
1127 /* To turn off the LED, set mode to OFF. */
1128 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1129 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1130 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1131 IXGBE_WRITE_FLUSH(hw);
1133 return IXGBE_SUCCESS;
1137 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1138 * @hw: pointer to hardware structure
1140 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1141 * ixgbe_hw struct in order to set up EEPROM access.
1143 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1145 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1149 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1151 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1152 eeprom->type = ixgbe_eeprom_none;
1153 /* Set default semaphore delay to 10ms which is a well
1155 eeprom->semaphore_delay = 10;
1156 /* Clear EEPROM page size, it will be initialized as needed */
1157 eeprom->word_page_size = 0;
1160 * Check for EEPROM present first.
1161 * If not present leave as none
1163 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1164 if (eec & IXGBE_EEC_PRES) {
1165 eeprom->type = ixgbe_eeprom_spi;
1168 * SPI EEPROM is assumed here. This code would need to
1169 * change if a future EEPROM is not SPI.
1171 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1172 IXGBE_EEC_SIZE_SHIFT);
1173 eeprom->word_size = 1 << (eeprom_size +
1174 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1177 if (eec & IXGBE_EEC_ADDR_SIZE)
1178 eeprom->address_bits = 16;
1180 eeprom->address_bits = 8;
1181 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1182 "%d\n", eeprom->type, eeprom->word_size,
1183 eeprom->address_bits);
1186 return IXGBE_SUCCESS;
1190 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1191 * @hw: pointer to hardware structure
1192 * @offset: offset within the EEPROM to write
1193 * @words: number of word(s)
1194 * @data: 16 bit word(s) to write to EEPROM
1196 * Reads 16 bit word(s) from EEPROM through bit-bang method
1198 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1199 u16 words, u16 *data)
1201 s32 status = IXGBE_SUCCESS;
1204 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1206 hw->eeprom.ops.init_params(hw);
1209 status = IXGBE_ERR_INVALID_ARGUMENT;
1213 if (offset + words > hw->eeprom.word_size) {
1214 status = IXGBE_ERR_EEPROM;
1219 * The EEPROM page size cannot be queried from the chip. We do lazy
1220 * initialization. It is worth to do that when we write large buffer.
1222 if ((hw->eeprom.word_page_size == 0) &&
1223 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1224 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1227 * We cannot hold synchronization semaphores for too long
1228 * to avoid other entity starvation. However it is more efficient
1229 * to read in bursts than synchronizing access for each word.
1231 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1232 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1233 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1234 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1237 if (status != IXGBE_SUCCESS)
1246 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1247 * @hw: pointer to hardware structure
1248 * @offset: offset within the EEPROM to be written to
1249 * @words: number of word(s)
1250 * @data: 16 bit word(s) to be written to the EEPROM
1252 * If ixgbe_eeprom_update_checksum is not called after this function, the
1253 * EEPROM will most likely contain an invalid checksum.
1255 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1256 u16 words, u16 *data)
1262 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1264 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1266 /* Prepare the EEPROM for writing */
1267 status = ixgbe_acquire_eeprom(hw);
1269 if (status == IXGBE_SUCCESS) {
1270 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1271 ixgbe_release_eeprom(hw);
1272 status = IXGBE_ERR_EEPROM;
1276 if (status == IXGBE_SUCCESS) {
1277 for (i = 0; i < words; i++) {
1278 ixgbe_standby_eeprom(hw);
1280 /* Send the WRITE ENABLE command (8 bit opcode ) */
1281 ixgbe_shift_out_eeprom_bits(hw,
1282 IXGBE_EEPROM_WREN_OPCODE_SPI,
1283 IXGBE_EEPROM_OPCODE_BITS);
1285 ixgbe_standby_eeprom(hw);
1288 * Some SPI eeproms use the 8th address bit embedded
1291 if ((hw->eeprom.address_bits == 8) &&
1292 ((offset + i) >= 128))
1293 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1295 /* Send the Write command (8-bit opcode + addr) */
1296 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1297 IXGBE_EEPROM_OPCODE_BITS);
1298 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1299 hw->eeprom.address_bits);
1301 page_size = hw->eeprom.word_page_size;
1303 /* Send the data in burst via SPI*/
1306 word = (word >> 8) | (word << 8);
1307 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1312 /* do not wrap around page */
1313 if (((offset + i) & (page_size - 1)) ==
1316 } while (++i < words);
1318 ixgbe_standby_eeprom(hw);
1321 /* Done with writing - release the EEPROM */
1322 ixgbe_release_eeprom(hw);
1329 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1330 * @hw: pointer to hardware structure
1331 * @offset: offset within the EEPROM to be written to
1332 * @data: 16 bit word to be written to the EEPROM
1334 * If ixgbe_eeprom_update_checksum is not called after this function, the
1335 * EEPROM will most likely contain an invalid checksum.
1337 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1341 DEBUGFUNC("ixgbe_write_eeprom_generic");
1343 hw->eeprom.ops.init_params(hw);
1345 if (offset >= hw->eeprom.word_size) {
1346 status = IXGBE_ERR_EEPROM;
1350 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1357 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1358 * @hw: pointer to hardware structure
1359 * @offset: offset within the EEPROM to be read
1360 * @data: read 16 bit words(s) from EEPROM
1361 * @words: number of word(s)
1363 * Reads 16 bit word(s) from EEPROM through bit-bang method
1365 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1366 u16 words, u16 *data)
1368 s32 status = IXGBE_SUCCESS;
1371 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1373 hw->eeprom.ops.init_params(hw);
1376 status = IXGBE_ERR_INVALID_ARGUMENT;
1380 if (offset + words > hw->eeprom.word_size) {
1381 status = IXGBE_ERR_EEPROM;
1386 * We cannot hold synchronization semaphores for too long
1387 * to avoid other entity starvation. However it is more efficient
1388 * to read in bursts than synchronizing access for each word.
1390 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1391 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1392 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1394 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1397 if (status != IXGBE_SUCCESS)
1406 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1407 * @hw: pointer to hardware structure
1408 * @offset: offset within the EEPROM to be read
1409 * @words: number of word(s)
1410 * @data: read 16 bit word(s) from EEPROM
1412 * Reads 16 bit word(s) from EEPROM through bit-bang method
1414 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1415 u16 words, u16 *data)
1419 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1422 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1424 /* Prepare the EEPROM for reading */
1425 status = ixgbe_acquire_eeprom(hw);
1427 if (status == IXGBE_SUCCESS) {
1428 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1429 ixgbe_release_eeprom(hw);
1430 status = IXGBE_ERR_EEPROM;
1434 if (status == IXGBE_SUCCESS) {
1435 for (i = 0; i < words; i++) {
1436 ixgbe_standby_eeprom(hw);
1438 * Some SPI eeproms use the 8th address bit embedded
1441 if ((hw->eeprom.address_bits == 8) &&
1442 ((offset + i) >= 128))
1443 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1445 /* Send the READ command (opcode + addr) */
1446 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1447 IXGBE_EEPROM_OPCODE_BITS);
1448 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1449 hw->eeprom.address_bits);
1451 /* Read the data. */
1452 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1453 data[i] = (word_in >> 8) | (word_in << 8);
1456 /* End this read operation */
1457 ixgbe_release_eeprom(hw);
1464 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1465 * @hw: pointer to hardware structure
1466 * @offset: offset within the EEPROM to be read
1467 * @data: read 16 bit value from EEPROM
1469 * Reads 16 bit value from EEPROM through bit-bang method
1471 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1476 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1478 hw->eeprom.ops.init_params(hw);
1480 if (offset >= hw->eeprom.word_size) {
1481 status = IXGBE_ERR_EEPROM;
1485 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1492 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1493 * @hw: pointer to hardware structure
1494 * @offset: offset of word in the EEPROM to read
1495 * @words: number of word(s)
1496 * @data: 16 bit word(s) from the EEPROM
1498 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1500 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1501 u16 words, u16 *data)
1504 s32 status = IXGBE_SUCCESS;
1507 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1509 hw->eeprom.ops.init_params(hw);
1512 status = IXGBE_ERR_INVALID_ARGUMENT;
1513 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1517 if (offset >= hw->eeprom.word_size) {
1518 status = IXGBE_ERR_EEPROM;
1519 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1523 for (i = 0; i < words; i++) {
1524 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1525 IXGBE_EEPROM_RW_REG_START;
1527 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1528 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1530 if (status == IXGBE_SUCCESS) {
1531 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1532 IXGBE_EEPROM_RW_REG_DATA);
1534 DEBUGOUT("Eeprom read timed out\n");
1543 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1544 * @hw: pointer to hardware structure
1545 * @offset: offset within the EEPROM to be used as a scratch pad
1547 * Discover EEPROM page size by writing marching data at given offset.
1548 * This function is called only when we are writing a new large buffer
1549 * at given offset so the data would be overwritten anyway.
1551 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1554 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1555 s32 status = IXGBE_SUCCESS;
1558 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1560 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1563 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1564 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1565 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1566 hw->eeprom.word_page_size = 0;
1567 if (status != IXGBE_SUCCESS)
1570 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1571 if (status != IXGBE_SUCCESS)
1575 * When writing in burst more than the actual page size
1576 * EEPROM address wraps around current page.
1578 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1580 DEBUGOUT1("Detected EEPROM page size = %d words.",
1581 hw->eeprom.word_page_size);
1587 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1588 * @hw: pointer to hardware structure
1589 * @offset: offset of word in the EEPROM to read
1590 * @data: word read from the EEPROM
1592 * Reads a 16 bit word from the EEPROM using the EERD register.
1594 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1596 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1600 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1601 * @hw: pointer to hardware structure
1602 * @offset: offset of word in the EEPROM to write
1603 * @words: number of word(s)
1604 * @data: word(s) write to the EEPROM
1606 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1608 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1609 u16 words, u16 *data)
1612 s32 status = IXGBE_SUCCESS;
1615 DEBUGFUNC("ixgbe_write_eewr_generic");
1617 hw->eeprom.ops.init_params(hw);
1620 status = IXGBE_ERR_INVALID_ARGUMENT;
1621 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1625 if (offset >= hw->eeprom.word_size) {
1626 status = IXGBE_ERR_EEPROM;
1627 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1631 for (i = 0; i < words; i++) {
1632 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1633 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1634 IXGBE_EEPROM_RW_REG_START;
1636 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1637 if (status != IXGBE_SUCCESS) {
1638 DEBUGOUT("Eeprom write EEWR timed out\n");
1642 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1644 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1645 if (status != IXGBE_SUCCESS) {
1646 DEBUGOUT("Eeprom write EEWR timed out\n");
1656 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1657 * @hw: pointer to hardware structure
1658 * @offset: offset of word in the EEPROM to write
1659 * @data: word write to the EEPROM
1661 * Write a 16 bit word to the EEPROM using the EEWR register.
1663 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1665 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1669 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1670 * @hw: pointer to hardware structure
1671 * @ee_reg: EEPROM flag for polling
1673 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1674 * read or write is done respectively.
1676 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1680 s32 status = IXGBE_ERR_EEPROM;
1682 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1684 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1685 if (ee_reg == IXGBE_NVM_POLL_READ)
1686 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1688 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1690 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1691 status = IXGBE_SUCCESS;
1697 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1698 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1699 "EEPROM read/write done polling timed out");
1705 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1706 * @hw: pointer to hardware structure
1708 * Prepares EEPROM for access using bit-bang method. This function should
1709 * be called before issuing a command to the EEPROM.
1711 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1713 s32 status = IXGBE_SUCCESS;
1717 DEBUGFUNC("ixgbe_acquire_eeprom");
1719 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1721 status = IXGBE_ERR_SWFW_SYNC;
1723 if (status == IXGBE_SUCCESS) {
1724 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1726 /* Request EEPROM Access */
1727 eec |= IXGBE_EEC_REQ;
1728 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1730 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1731 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1732 if (eec & IXGBE_EEC_GNT)
1737 /* Release if grant not acquired */
1738 if (!(eec & IXGBE_EEC_GNT)) {
1739 eec &= ~IXGBE_EEC_REQ;
1740 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1741 DEBUGOUT("Could not acquire EEPROM grant\n");
1743 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1744 status = IXGBE_ERR_EEPROM;
1747 /* Setup EEPROM for Read/Write */
1748 if (status == IXGBE_SUCCESS) {
1749 /* Clear CS and SK */
1750 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1751 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1752 IXGBE_WRITE_FLUSH(hw);
1760 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1761 * @hw: pointer to hardware structure
1763 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1765 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1767 s32 status = IXGBE_ERR_EEPROM;
1772 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1775 /* Get SMBI software semaphore between device drivers first */
1776 for (i = 0; i < timeout; i++) {
1778 * If the SMBI bit is 0 when we read it, then the bit will be
1779 * set and we have the semaphore
1781 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1782 if (!(swsm & IXGBE_SWSM_SMBI)) {
1783 status = IXGBE_SUCCESS;
1790 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1793 * this release is particularly important because our attempts
1794 * above to get the semaphore may have succeeded, and if there
1795 * was a timeout, we should unconditionally clear the semaphore
1796 * bits to free the driver to make progress
1798 ixgbe_release_eeprom_semaphore(hw);
1803 * If the SMBI bit is 0 when we read it, then the bit will be
1804 * set and we have the semaphore
1806 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1807 if (!(swsm & IXGBE_SWSM_SMBI))
1808 status = IXGBE_SUCCESS;
1811 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1812 if (status == IXGBE_SUCCESS) {
1813 for (i = 0; i < timeout; i++) {
1814 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1816 /* Set the SW EEPROM semaphore bit to request access */
1817 swsm |= IXGBE_SWSM_SWESMBI;
1818 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1821 * If we set the bit successfully then we got the
1824 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1825 if (swsm & IXGBE_SWSM_SWESMBI)
1832 * Release semaphores and return error if SW EEPROM semaphore
1833 * was not granted because we don't have access to the EEPROM
1836 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1837 "SWESMBI Software EEPROM semaphore not granted.\n");
1838 ixgbe_release_eeprom_semaphore(hw);
1839 status = IXGBE_ERR_EEPROM;
1842 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1843 "Software semaphore SMBI between device drivers "
1851 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1852 * @hw: pointer to hardware structure
1854 * This function clears hardware semaphore bits.
1856 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1860 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1862 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1864 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1865 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1866 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1867 IXGBE_WRITE_FLUSH(hw);
1871 * ixgbe_ready_eeprom - Polls for EEPROM ready
1872 * @hw: pointer to hardware structure
1874 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1876 s32 status = IXGBE_SUCCESS;
1880 DEBUGFUNC("ixgbe_ready_eeprom");
1883 * Read "Status Register" repeatedly until the LSB is cleared. The
1884 * EEPROM will signal that the command has been completed by clearing
1885 * bit 0 of the internal status register. If it's not cleared within
1886 * 5 milliseconds, then error out.
1888 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1889 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1890 IXGBE_EEPROM_OPCODE_BITS);
1891 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1892 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1896 ixgbe_standby_eeprom(hw);
1900 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1901 * devices (and only 0-5mSec on 5V devices)
1903 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1904 DEBUGOUT("SPI EEPROM Status error\n");
1905 status = IXGBE_ERR_EEPROM;
1912 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1913 * @hw: pointer to hardware structure
1915 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1919 DEBUGFUNC("ixgbe_standby_eeprom");
1921 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1923 /* Toggle CS to flush commands */
1924 eec |= IXGBE_EEC_CS;
1925 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1926 IXGBE_WRITE_FLUSH(hw);
1928 eec &= ~IXGBE_EEC_CS;
1929 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1930 IXGBE_WRITE_FLUSH(hw);
1935 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1936 * @hw: pointer to hardware structure
1937 * @data: data to send to the EEPROM
1938 * @count: number of bits to shift out
1940 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1947 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1949 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1952 * Mask is used to shift "count" bits of "data" out to the EEPROM
1953 * one bit at a time. Determine the starting bit based on count
1955 mask = 0x01 << (count - 1);
1957 for (i = 0; i < count; i++) {
1959 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1960 * "1", and then raising and then lowering the clock (the SK
1961 * bit controls the clock input to the EEPROM). A "0" is
1962 * shifted out to the EEPROM by setting "DI" to "0" and then
1963 * raising and then lowering the clock.
1966 eec |= IXGBE_EEC_DI;
1968 eec &= ~IXGBE_EEC_DI;
1970 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1971 IXGBE_WRITE_FLUSH(hw);
1975 ixgbe_raise_eeprom_clk(hw, &eec);
1976 ixgbe_lower_eeprom_clk(hw, &eec);
1979 * Shift mask to signify next bit of data to shift in to the
1985 /* We leave the "DI" bit set to "0" when we leave this routine. */
1986 eec &= ~IXGBE_EEC_DI;
1987 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1988 IXGBE_WRITE_FLUSH(hw);
1992 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1993 * @hw: pointer to hardware structure
1995 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2001 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2004 * In order to read a register from the EEPROM, we need to shift
2005 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2006 * the clock input to the EEPROM (setting the SK bit), and then reading
2007 * the value of the "DO" bit. During this "shifting in" process the
2008 * "DI" bit should always be clear.
2010 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2012 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2014 for (i = 0; i < count; i++) {
2016 ixgbe_raise_eeprom_clk(hw, &eec);
2018 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2020 eec &= ~(IXGBE_EEC_DI);
2021 if (eec & IXGBE_EEC_DO)
2024 ixgbe_lower_eeprom_clk(hw, &eec);
2031 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2032 * @hw: pointer to hardware structure
2033 * @eec: EEC register's current value
2035 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2037 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2040 * Raise the clock input to the EEPROM
2041 * (setting the SK bit), then delay
2043 *eec = *eec | IXGBE_EEC_SK;
2044 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2045 IXGBE_WRITE_FLUSH(hw);
2050 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2051 * @hw: pointer to hardware structure
2052 * @eecd: EECD's current value
2054 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2056 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2059 * Lower the clock input to the EEPROM (clearing the SK bit), then
2062 *eec = *eec & ~IXGBE_EEC_SK;
2063 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2064 IXGBE_WRITE_FLUSH(hw);
2069 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2070 * @hw: pointer to hardware structure
2072 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2076 DEBUGFUNC("ixgbe_release_eeprom");
2078 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2080 eec |= IXGBE_EEC_CS; /* Pull CS high */
2081 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2083 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2084 IXGBE_WRITE_FLUSH(hw);
2088 /* Stop requesting EEPROM access */
2089 eec &= ~IXGBE_EEC_REQ;
2090 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2092 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2094 /* Delay before attempt to obtain semaphore again to allow FW access */
2095 msec_delay(hw->eeprom.semaphore_delay);
2099 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2100 * @hw: pointer to hardware structure
2102 * Returns a negative error code on error, or the 16-bit checksum
2104 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2113 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2115 /* Include 0x0-0x3F in the checksum */
2116 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2117 if (hw->eeprom.ops.read(hw, i, &word)) {
2118 DEBUGOUT("EEPROM read failed\n");
2119 return IXGBE_ERR_EEPROM;
2124 /* Include all data from pointers except for the fw pointer */
2125 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2126 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2127 DEBUGOUT("EEPROM read failed\n");
2128 return IXGBE_ERR_EEPROM;
2131 /* If the pointer seems invalid */
2132 if (pointer == 0xFFFF || pointer == 0)
2135 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2136 DEBUGOUT("EEPROM read failed\n");
2137 return IXGBE_ERR_EEPROM;
2140 if (length == 0xFFFF || length == 0)
2143 for (j = pointer + 1; j <= pointer + length; j++) {
2144 if (hw->eeprom.ops.read(hw, j, &word)) {
2145 DEBUGOUT("EEPROM read failed\n");
2146 return IXGBE_ERR_EEPROM;
2152 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2154 return (s32)checksum;
2158 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2159 * @hw: pointer to hardware structure
2160 * @checksum_val: calculated checksum
2162 * Performs checksum calculation and validates the EEPROM checksum. If the
2163 * caller does not need checksum_val, the value can be NULL.
2165 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2170 u16 read_checksum = 0;
2172 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2174 /* Read the first word from the EEPROM. If this times out or fails, do
2175 * not continue or we could be in for a very long wait while every
2178 status = hw->eeprom.ops.read(hw, 0, &checksum);
2180 DEBUGOUT("EEPROM read failed\n");
2184 status = hw->eeprom.ops.calc_checksum(hw);
2188 checksum = (u16)(status & 0xffff);
2190 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2192 DEBUGOUT("EEPROM read failed\n");
2196 /* Verify read checksum from EEPROM is the same as
2197 * calculated checksum
2199 if (read_checksum != checksum)
2200 status = IXGBE_ERR_EEPROM_CHECKSUM;
2202 /* If the user cares, return the calculated checksum */
2204 *checksum_val = checksum;
2210 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2211 * @hw: pointer to hardware structure
2213 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2218 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2220 /* Read the first word from the EEPROM. If this times out or fails, do
2221 * not continue or we could be in for a very long wait while every
2224 status = hw->eeprom.ops.read(hw, 0, &checksum);
2226 DEBUGOUT("EEPROM read failed\n");
2230 status = hw->eeprom.ops.calc_checksum(hw);
2234 checksum = (u16)(status & 0xffff);
2236 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2242 * ixgbe_validate_mac_addr - Validate MAC address
2243 * @mac_addr: pointer to MAC address.
2245 * Tests a MAC address to ensure it is a valid Individual Address
2247 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2249 s32 status = IXGBE_SUCCESS;
2251 DEBUGFUNC("ixgbe_validate_mac_addr");
2253 /* Make sure it is not a multicast address */
2254 if (IXGBE_IS_MULTICAST(mac_addr)) {
2255 DEBUGOUT("MAC address is multicast\n");
2256 status = IXGBE_ERR_INVALID_MAC_ADDR;
2257 /* Not a broadcast address */
2258 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2259 DEBUGOUT("MAC address is broadcast\n");
2260 status = IXGBE_ERR_INVALID_MAC_ADDR;
2261 /* Reject the zero address */
2262 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2263 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2264 DEBUGOUT("MAC address is all zeros\n");
2265 status = IXGBE_ERR_INVALID_MAC_ADDR;
2271 * ixgbe_set_rar_generic - Set Rx address register
2272 * @hw: pointer to hardware structure
2273 * @index: Receive address register to write
2274 * @addr: Address to put into receive address register
2275 * @vmdq: VMDq "set" or "pool" index
2276 * @enable_addr: set flag that address is active
2278 * Puts an ethernet address into a receive address register.
2280 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2283 u32 rar_low, rar_high;
2284 u32 rar_entries = hw->mac.num_rar_entries;
2286 DEBUGFUNC("ixgbe_set_rar_generic");
2288 /* Make sure we are using a valid rar index range */
2289 if (index >= rar_entries) {
2290 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2291 "RAR index %d is out of range.\n", index);
2292 return IXGBE_ERR_INVALID_ARGUMENT;
2295 /* setup VMDq pool selection before this RAR gets enabled */
2296 hw->mac.ops.set_vmdq(hw, index, vmdq);
2299 * HW expects these in little endian so we reverse the byte
2300 * order from network order (big endian) to little endian
2302 rar_low = ((u32)addr[0] |
2303 ((u32)addr[1] << 8) |
2304 ((u32)addr[2] << 16) |
2305 ((u32)addr[3] << 24));
2307 * Some parts put the VMDq setting in the extra RAH bits,
2308 * so save everything except the lower 16 bits that hold part
2309 * of the address and the address valid bit.
2311 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2312 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2313 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2315 if (enable_addr != 0)
2316 rar_high |= IXGBE_RAH_AV;
2318 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2319 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2321 return IXGBE_SUCCESS;
2325 * ixgbe_clear_rar_generic - Remove Rx address register
2326 * @hw: pointer to hardware structure
2327 * @index: Receive address register to write
2329 * Clears an ethernet address from a receive address register.
2331 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2334 u32 rar_entries = hw->mac.num_rar_entries;
2336 DEBUGFUNC("ixgbe_clear_rar_generic");
2338 /* Make sure we are using a valid rar index range */
2339 if (index >= rar_entries) {
2340 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2341 "RAR index %d is out of range.\n", index);
2342 return IXGBE_ERR_INVALID_ARGUMENT;
2346 * Some parts put the VMDq setting in the extra RAH bits,
2347 * so save everything except the lower 16 bits that hold part
2348 * of the address and the address valid bit.
2350 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2351 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2353 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2354 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2356 /* clear VMDq pool/queue selection for this RAR */
2357 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2359 return IXGBE_SUCCESS;
2363 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2364 * @hw: pointer to hardware structure
2366 * Places the MAC address in receive address register 0 and clears the rest
2367 * of the receive address registers. Clears the multicast table. Assumes
2368 * the receiver is in reset when the routine is called.
2370 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2373 u32 rar_entries = hw->mac.num_rar_entries;
2375 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2378 * If the current mac address is valid, assume it is a software override
2379 * to the permanent address.
2380 * Otherwise, use the permanent address from the eeprom.
2382 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2383 IXGBE_ERR_INVALID_MAC_ADDR) {
2384 /* Get the MAC address from the RAR0 for later reference */
2385 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2387 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2388 hw->mac.addr[0], hw->mac.addr[1],
2390 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2391 hw->mac.addr[4], hw->mac.addr[5]);
2393 /* Setup the receive address. */
2394 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2395 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2396 hw->mac.addr[0], hw->mac.addr[1],
2398 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2399 hw->mac.addr[4], hw->mac.addr[5]);
2401 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2403 /* clear VMDq pool/queue selection for RAR 0 */
2404 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2406 hw->addr_ctrl.overflow_promisc = 0;
2408 hw->addr_ctrl.rar_used_count = 1;
2410 /* Zero out the other receive addresses. */
2411 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2412 for (i = 1; i < rar_entries; i++) {
2413 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2414 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2418 hw->addr_ctrl.mta_in_use = 0;
2419 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2421 DEBUGOUT(" Clearing MTA\n");
2422 for (i = 0; i < hw->mac.mcft_size; i++)
2423 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2425 ixgbe_init_uta_tables(hw);
2427 return IXGBE_SUCCESS;
2431 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2432 * @hw: pointer to hardware structure
2433 * @addr: new address
2435 * Adds it to unused receive address register or goes into promiscuous mode.
2437 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2439 u32 rar_entries = hw->mac.num_rar_entries;
2442 DEBUGFUNC("ixgbe_add_uc_addr");
2444 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2445 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2448 * Place this address in the RAR if there is room,
2449 * else put the controller into promiscuous mode
2451 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2452 rar = hw->addr_ctrl.rar_used_count;
2453 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2454 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2455 hw->addr_ctrl.rar_used_count++;
2457 hw->addr_ctrl.overflow_promisc++;
2460 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2464 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2465 * @hw: pointer to hardware structure
2466 * @addr_list: the list of new addresses
2467 * @addr_count: number of addresses
2468 * @next: iterator function to walk the address list
2470 * The given list replaces any existing list. Clears the secondary addrs from
2471 * receive address registers. Uses unused receive address registers for the
2472 * first secondary addresses, and falls back to promiscuous mode as needed.
2474 * Drivers using secondary unicast addresses must set user_set_promisc when
2475 * manually putting the device into promiscuous mode.
2477 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2478 u32 addr_count, ixgbe_mc_addr_itr next)
2482 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2487 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2490 * Clear accounting of old secondary address list,
2491 * don't count RAR[0]
2493 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2494 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2495 hw->addr_ctrl.overflow_promisc = 0;
2497 /* Zero out the other receive addresses */
2498 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2499 for (i = 0; i < uc_addr_in_use; i++) {
2500 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2501 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2504 /* Add the new addresses */
2505 for (i = 0; i < addr_count; i++) {
2506 DEBUGOUT(" Adding the secondary addresses:\n");
2507 addr = next(hw, &addr_list, &vmdq);
2508 ixgbe_add_uc_addr(hw, addr, vmdq);
2511 if (hw->addr_ctrl.overflow_promisc) {
2512 /* enable promisc if not already in overflow or set by user */
2513 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2514 DEBUGOUT(" Entering address overflow promisc mode\n");
2515 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2516 fctrl |= IXGBE_FCTRL_UPE;
2517 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2520 /* only disable if set by overflow, not by user */
2521 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2522 DEBUGOUT(" Leaving address overflow promisc mode\n");
2523 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2524 fctrl &= ~IXGBE_FCTRL_UPE;
2525 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2529 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2530 return IXGBE_SUCCESS;
2534 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2535 * @hw: pointer to hardware structure
2536 * @mc_addr: the multicast address
2538 * Extracts the 12 bits, from a multicast address, to determine which
2539 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2540 * incoming rx multicast addresses, to determine the bit-vector to check in
2541 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2542 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2543 * to mc_filter_type.
2545 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2549 DEBUGFUNC("ixgbe_mta_vector");
2551 switch (hw->mac.mc_filter_type) {
2552 case 0: /* use bits [47:36] of the address */
2553 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2555 case 1: /* use bits [46:35] of the address */
2556 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2558 case 2: /* use bits [45:34] of the address */
2559 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2561 case 3: /* use bits [43:32] of the address */
2562 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2564 default: /* Invalid mc_filter_type */
2565 DEBUGOUT("MC filter type param set incorrectly\n");
2570 /* vector can only be 12-bits or boundary will be exceeded */
2576 * ixgbe_set_mta - Set bit-vector in multicast table
2577 * @hw: pointer to hardware structure
2578 * @hash_value: Multicast address hash value
2580 * Sets the bit-vector in the multicast table.
2582 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2588 DEBUGFUNC("ixgbe_set_mta");
2590 hw->addr_ctrl.mta_in_use++;
2592 vector = ixgbe_mta_vector(hw, mc_addr);
2593 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2596 * The MTA is a register array of 128 32-bit registers. It is treated
2597 * like an array of 4096 bits. We want to set bit
2598 * BitArray[vector_value]. So we figure out what register the bit is
2599 * in, read it, OR in the new bit, then write back the new value. The
2600 * register is determined by the upper 7 bits of the vector value and
2601 * the bit within that register are determined by the lower 5 bits of
2604 vector_reg = (vector >> 5) & 0x7F;
2605 vector_bit = vector & 0x1F;
2606 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2610 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2611 * @hw: pointer to hardware structure
2612 * @mc_addr_list: the list of new multicast addresses
2613 * @mc_addr_count: number of addresses
2614 * @next: iterator function to walk the multicast address list
2615 * @clear: flag, when set clears the table beforehand
2617 * When the clear flag is set, the given list replaces any existing list.
2618 * Hashes the given addresses into the multicast table.
2620 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2621 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2627 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2630 * Set the new number of MC addresses that we are being requested to
2633 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2634 hw->addr_ctrl.mta_in_use = 0;
2636 /* Clear mta_shadow */
2638 DEBUGOUT(" Clearing MTA\n");
2639 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2642 /* Update mta_shadow */
2643 for (i = 0; i < mc_addr_count; i++) {
2644 DEBUGOUT(" Adding the multicast addresses:\n");
2645 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2649 for (i = 0; i < hw->mac.mcft_size; i++)
2650 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2651 hw->mac.mta_shadow[i]);
2653 if (hw->addr_ctrl.mta_in_use > 0)
2654 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2655 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2657 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2658 return IXGBE_SUCCESS;
2662 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2663 * @hw: pointer to hardware structure
2665 * Enables multicast address in RAR and the use of the multicast hash table.
2667 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2669 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2671 DEBUGFUNC("ixgbe_enable_mc_generic");
2673 if (a->mta_in_use > 0)
2674 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2675 hw->mac.mc_filter_type);
2677 return IXGBE_SUCCESS;
2681 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2682 * @hw: pointer to hardware structure
2684 * Disables multicast address in RAR and the use of the multicast hash table.
2686 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2688 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2690 DEBUGFUNC("ixgbe_disable_mc_generic");
2692 if (a->mta_in_use > 0)
2693 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2695 return IXGBE_SUCCESS;
2699 * ixgbe_fc_enable_generic - Enable flow control
2700 * @hw: pointer to hardware structure
2702 * Enable flow control according to the current settings.
2704 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2706 s32 ret_val = IXGBE_SUCCESS;
2707 u32 mflcn_reg, fccfg_reg;
2712 DEBUGFUNC("ixgbe_fc_enable_generic");
2714 /* Validate the water mark configuration */
2715 if (!hw->fc.pause_time) {
2716 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2720 /* Low water mark of zero causes XOFF floods */
2721 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2722 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2723 hw->fc.high_water[i]) {
2724 if (!hw->fc.low_water[i] ||
2725 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2726 DEBUGOUT("Invalid water mark configuration\n");
2727 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2733 /* Negotiate the fc mode to use */
2734 ixgbe_fc_autoneg(hw);
2736 /* Disable any previous flow control settings */
2737 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2738 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2740 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2741 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2744 * The possible values of fc.current_mode are:
2745 * 0: Flow control is completely disabled
2746 * 1: Rx flow control is enabled (we can receive pause frames,
2747 * but not send pause frames).
2748 * 2: Tx flow control is enabled (we can send pause frames but
2749 * we do not support receiving pause frames).
2750 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2753 switch (hw->fc.current_mode) {
2756 * Flow control is disabled by software override or autoneg.
2757 * The code below will actually disable it in the HW.
2760 case ixgbe_fc_rx_pause:
2762 * Rx Flow control is enabled and Tx Flow control is
2763 * disabled by software override. Since there really
2764 * isn't a way to advertise that we are capable of RX
2765 * Pause ONLY, we will advertise that we support both
2766 * symmetric and asymmetric Rx PAUSE. Later, we will
2767 * disable the adapter's ability to send PAUSE frames.
2769 mflcn_reg |= IXGBE_MFLCN_RFCE;
2771 case ixgbe_fc_tx_pause:
2773 * Tx Flow control is enabled, and Rx Flow control is
2774 * disabled by software override.
2776 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2779 /* Flow control (both Rx and Tx) is enabled by SW override. */
2780 mflcn_reg |= IXGBE_MFLCN_RFCE;
2781 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2784 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2785 "Flow control param set incorrectly\n");
2786 ret_val = IXGBE_ERR_CONFIG;
2791 /* Set 802.3x based flow control settings. */
2792 mflcn_reg |= IXGBE_MFLCN_DPF;
2793 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2794 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2797 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2798 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2799 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2800 hw->fc.high_water[i]) {
2801 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2802 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2803 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2805 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2807 * In order to prevent Tx hangs when the internal Tx
2808 * switch is enabled we must set the high water mark
2809 * to the Rx packet buffer size - 24KB. This allows
2810 * the Tx switch to function even under heavy Rx
2813 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2816 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2819 /* Configure pause time (2 TCs per register) */
2820 reg = hw->fc.pause_time * 0x00010001;
2821 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2822 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2824 /* Configure flow control refresh threshold value */
2825 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2832 * ixgbe_negotiate_fc - Negotiate flow control
2833 * @hw: pointer to hardware structure
2834 * @adv_reg: flow control advertised settings
2835 * @lp_reg: link partner's flow control settings
2836 * @adv_sym: symmetric pause bit in advertisement
2837 * @adv_asm: asymmetric pause bit in advertisement
2838 * @lp_sym: symmetric pause bit in link partner advertisement
2839 * @lp_asm: asymmetric pause bit in link partner advertisement
2841 * Find the intersection between advertised settings and link partner's
2842 * advertised settings
2844 STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2845 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2847 if ((!(adv_reg)) || (!(lp_reg))) {
2848 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2849 "Local or link partner's advertised flow control "
2850 "settings are NULL. Local: %x, link partner: %x\n",
2852 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2855 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2857 * Now we need to check if the user selected Rx ONLY
2858 * of pause frames. In this case, we had to advertise
2859 * FULL flow control because we could not advertise RX
2860 * ONLY. Hence, we must now check to see if we need to
2861 * turn OFF the TRANSMISSION of PAUSE frames.
2863 if (hw->fc.requested_mode == ixgbe_fc_full) {
2864 hw->fc.current_mode = ixgbe_fc_full;
2865 DEBUGOUT("Flow Control = FULL.\n");
2867 hw->fc.current_mode = ixgbe_fc_rx_pause;
2868 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2870 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2871 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2872 hw->fc.current_mode = ixgbe_fc_tx_pause;
2873 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2874 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2875 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2876 hw->fc.current_mode = ixgbe_fc_rx_pause;
2877 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2879 hw->fc.current_mode = ixgbe_fc_none;
2880 DEBUGOUT("Flow Control = NONE.\n");
2882 return IXGBE_SUCCESS;
2886 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2887 * @hw: pointer to hardware structure
2889 * Enable flow control according on 1 gig fiber.
2891 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2893 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2894 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2897 * On multispeed fiber at 1g, bail out if
2898 * - link is up but AN did not complete, or if
2899 * - link is up and AN completed but timed out
2902 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2903 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2904 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2905 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2909 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2910 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2912 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2913 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2914 IXGBE_PCS1GANA_ASM_PAUSE,
2915 IXGBE_PCS1GANA_SYM_PAUSE,
2916 IXGBE_PCS1GANA_ASM_PAUSE);
2923 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2924 * @hw: pointer to hardware structure
2926 * Enable flow control according to IEEE clause 37.
2928 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2930 u32 links2, anlp1_reg, autoc_reg, links;
2931 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2934 * On backplane, bail out if
2935 * - backplane autoneg was not completed, or if
2936 * - we are 82599 and link partner is not AN enabled
2938 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2939 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2940 DEBUGOUT("Auto-Negotiation did not complete\n");
2944 if (hw->mac.type == ixgbe_mac_82599EB) {
2945 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2946 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2947 DEBUGOUT("Link partner is not AN enabled\n");
2952 * Read the 10g AN autoc and LP ability registers and resolve
2953 * local flow control settings accordingly
2955 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2956 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2958 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2959 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2960 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2967 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2968 * @hw: pointer to hardware structure
2970 * Enable flow control according to IEEE clause 37.
2972 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2974 u16 technology_ability_reg = 0;
2975 u16 lp_technology_ability_reg = 0;
2977 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2978 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2979 &technology_ability_reg);
2980 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2981 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2982 &lp_technology_ability_reg);
2984 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2985 (u32)lp_technology_ability_reg,
2986 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2987 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2991 * ixgbe_fc_autoneg - Configure flow control
2992 * @hw: pointer to hardware structure
2994 * Compares our advertised flow control capabilities to those advertised by
2995 * our link partner, and determines the proper flow control mode to use.
2997 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2999 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3000 ixgbe_link_speed speed;
3003 DEBUGFUNC("ixgbe_fc_autoneg");
3006 * AN should have completed when the cable was plugged in.
3007 * Look for reasons to bail out. Bail out if:
3008 * - FC autoneg is disabled, or if
3011 if (hw->fc.disable_fc_autoneg) {
3012 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3013 "Flow control autoneg is disabled");
3017 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3019 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3023 switch (hw->phy.media_type) {
3024 /* Autoneg flow control on fiber adapters */
3025 case ixgbe_media_type_fiber_qsfp:
3026 case ixgbe_media_type_fiber:
3027 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3028 ret_val = ixgbe_fc_autoneg_fiber(hw);
3031 /* Autoneg flow control on backplane adapters */
3032 case ixgbe_media_type_backplane:
3033 ret_val = ixgbe_fc_autoneg_backplane(hw);
3036 /* Autoneg flow control on copper adapters */
3037 case ixgbe_media_type_copper:
3038 if (ixgbe_device_supports_autoneg_fc(hw))
3039 ret_val = ixgbe_fc_autoneg_copper(hw);
3047 if (ret_val == IXGBE_SUCCESS) {
3048 hw->fc.fc_was_autonegged = true;
3050 hw->fc.fc_was_autonegged = false;
3051 hw->fc.current_mode = hw->fc.requested_mode;
3056 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3057 * @hw: pointer to hardware structure
3059 * System-wide timeout range is encoded in PCIe Device Control2 register.
3061 * Add 10% to specified maximum and return the number of times to poll for
3062 * completion timeout, in units of 100 microsec. Never return less than
3063 * 800 = 80 millisec.
3065 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3070 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3071 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3074 case IXGBE_PCIDEVCTRL2_65_130ms:
3075 pollcnt = 1300; /* 130 millisec */
3077 case IXGBE_PCIDEVCTRL2_260_520ms:
3078 pollcnt = 5200; /* 520 millisec */
3080 case IXGBE_PCIDEVCTRL2_1_2s:
3081 pollcnt = 20000; /* 2 sec */
3083 case IXGBE_PCIDEVCTRL2_4_8s:
3084 pollcnt = 80000; /* 8 sec */
3086 case IXGBE_PCIDEVCTRL2_17_34s:
3087 pollcnt = 34000; /* 34 sec */
3089 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3090 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3091 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3092 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3094 pollcnt = 800; /* 80 millisec minimum */
3098 /* add 10% to spec maximum */
3099 return (pollcnt * 11) / 10;
3103 * ixgbe_disable_pcie_master - Disable PCI-express master access
3104 * @hw: pointer to hardware structure
3106 * Disables PCI-Express master access and verifies there are no pending
3107 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3108 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3109 * is returned signifying master requests disabled.
3111 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3113 s32 status = IXGBE_SUCCESS;
3117 DEBUGFUNC("ixgbe_disable_pcie_master");
3119 /* Always set this bit to ensure any future transactions are blocked */
3120 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3122 /* Exit if master requests are blocked */
3123 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3124 IXGBE_REMOVED(hw->hw_addr))
3127 /* Poll for master request bit to clear */
3128 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3130 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3135 * Two consecutive resets are required via CTRL.RST per datasheet
3136 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3137 * of this need. The first reset prevents new master requests from
3138 * being issued by our device. We then must wait 1usec or more for any
3139 * remaining completions from the PCIe bus to trickle in, and then reset
3140 * again to clear out any effects they may have had on our device.
3142 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3143 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3145 if (hw->mac.type >= ixgbe_mac_X550)
3149 * Before proceeding, make sure that the PCIe block does not have
3150 * transactions pending.
3152 poll = ixgbe_pcie_timeout_poll(hw);
3153 for (i = 0; i < poll; i++) {
3155 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3156 if (IXGBE_REMOVED(hw->hw_addr))
3158 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3162 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3163 "PCIe transaction pending bit also did not clear.\n");
3164 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3171 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3172 * @hw: pointer to hardware structure
3173 * @mask: Mask to specify which semaphore to acquire
3175 * Acquires the SWFW semaphore through the GSSR register for the specified
3176 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3178 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3182 u32 fwmask = mask << 5;
3186 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3188 for (i = 0; i < timeout; i++) {
3190 * SW NVM semaphore bit is used for access to all
3191 * SW_FW_SYNC bits (not just NVM)
3193 if (ixgbe_get_eeprom_semaphore(hw))
3194 return IXGBE_ERR_SWFW_SYNC;
3196 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3197 if (!(gssr & (fwmask | swmask))) {
3199 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3200 ixgbe_release_eeprom_semaphore(hw);
3201 return IXGBE_SUCCESS;
3203 /* Resource is currently in use by FW or SW */
3204 ixgbe_release_eeprom_semaphore(hw);
3209 /* If time expired clear the bits holding the lock and retry */
3210 if (gssr & (fwmask | swmask))
3211 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3214 return IXGBE_ERR_SWFW_SYNC;
3218 * ixgbe_release_swfw_sync - Release SWFW semaphore
3219 * @hw: pointer to hardware structure
3220 * @mask: Mask to specify which semaphore to release
3222 * Releases the SWFW semaphore through the GSSR register for the specified
3223 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3225 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3230 DEBUGFUNC("ixgbe_release_swfw_sync");
3232 ixgbe_get_eeprom_semaphore(hw);
3234 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3236 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3238 ixgbe_release_eeprom_semaphore(hw);
3242 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3243 * @hw: pointer to hardware structure
3245 * Stops the receive data path and waits for the HW to internally empty
3246 * the Rx security block
3248 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3250 #define IXGBE_MAX_SECRX_POLL 40
3255 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3258 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3259 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3260 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3261 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3262 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3263 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3266 /* Use interrupt-safe sleep just in case */
3270 /* For informational purposes only */
3271 if (i >= IXGBE_MAX_SECRX_POLL)
3272 DEBUGOUT("Rx unit being enabled before security "
3273 "path fully disabled. Continuing with init.\n");
3275 return IXGBE_SUCCESS;
3279 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3280 * @hw: pointer to hardware structure
3281 * @reg_val: Value we read from AUTOC
3283 * The default case requires no protection so just to the register read.
3285 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3288 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3289 return IXGBE_SUCCESS;
3293 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3294 * @hw: pointer to hardware structure
3295 * @reg_val: value to write to AUTOC
3296 * @locked: bool to indicate whether the SW/FW lock was already taken by
3299 * The default case requires no protection so just to the register write.
3301 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3303 UNREFERENCED_1PARAMETER(locked);
3305 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3306 return IXGBE_SUCCESS;
3310 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3311 * @hw: pointer to hardware structure
3313 * Enables the receive data path.
3315 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3319 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3321 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3322 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3323 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3324 IXGBE_WRITE_FLUSH(hw);
3326 return IXGBE_SUCCESS;
3330 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3331 * @hw: pointer to hardware structure
3332 * @regval: register value to write to RXCTRL
3334 * Enables the Rx DMA unit
3336 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3338 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3340 if (regval & IXGBE_RXCTRL_RXEN)
3341 ixgbe_enable_rx(hw);
3343 ixgbe_disable_rx(hw);
3345 return IXGBE_SUCCESS;
3349 * ixgbe_blink_led_start_generic - Blink LED based on index.
3350 * @hw: pointer to hardware structure
3351 * @index: led number to blink
3353 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3355 ixgbe_link_speed speed = 0;
3358 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3359 s32 ret_val = IXGBE_SUCCESS;
3360 bool locked = false;
3362 DEBUGFUNC("ixgbe_blink_led_start_generic");
3365 * Link must be up to auto-blink the LEDs;
3366 * Force it if link is down.
3368 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3371 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3372 if (ret_val != IXGBE_SUCCESS)
3375 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3376 autoc_reg |= IXGBE_AUTOC_FLU;
3378 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3379 if (ret_val != IXGBE_SUCCESS)
3382 IXGBE_WRITE_FLUSH(hw);
3386 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3387 led_reg |= IXGBE_LED_BLINK(index);
3388 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3389 IXGBE_WRITE_FLUSH(hw);
3396 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3397 * @hw: pointer to hardware structure
3398 * @index: led number to stop blinking
3400 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3403 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3404 s32 ret_val = IXGBE_SUCCESS;
3405 bool locked = false;
3407 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3409 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3410 if (ret_val != IXGBE_SUCCESS)
3413 autoc_reg &= ~IXGBE_AUTOC_FLU;
3414 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3416 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3417 if (ret_val != IXGBE_SUCCESS)
3420 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3421 led_reg &= ~IXGBE_LED_BLINK(index);
3422 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3423 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3424 IXGBE_WRITE_FLUSH(hw);
3431 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3432 * @hw: pointer to hardware structure
3433 * @san_mac_offset: SAN MAC address offset
3435 * This function will read the EEPROM location for the SAN MAC address
3436 * pointer, and returns the value at that location. This is used in both
3437 * get and set mac_addr routines.
3439 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3440 u16 *san_mac_offset)
3444 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3447 * First read the EEPROM pointer to see if the MAC addresses are
3450 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3453 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3454 "eeprom at offset %d failed",
3455 IXGBE_SAN_MAC_ADDR_PTR);
3462 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3463 * @hw: pointer to hardware structure
3464 * @san_mac_addr: SAN MAC address
3466 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3467 * per-port, so set_lan_id() must be called before reading the addresses.
3468 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3469 * upon for non-SFP connections, so we must call it here.
3471 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3473 u16 san_mac_data, san_mac_offset;
3477 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3480 * First read the EEPROM pointer to see if the MAC addresses are
3481 * available. If they're not, no point in calling set_lan_id() here.
3483 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3484 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3485 goto san_mac_addr_out;
3487 /* make sure we know which port we need to program */
3488 hw->mac.ops.set_lan_id(hw);
3489 /* apply the port offset to the address offset */
3490 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3491 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3492 for (i = 0; i < 3; i++) {
3493 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3496 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3497 "eeprom read at offset %d failed",
3499 goto san_mac_addr_out;
3501 san_mac_addr[i * 2] = (u8)(san_mac_data);
3502 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3505 return IXGBE_SUCCESS;
3509 * No addresses available in this EEPROM. It's not an
3510 * error though, so just wipe the local address and return.
3512 for (i = 0; i < 6; i++)
3513 san_mac_addr[i] = 0xFF;
3514 return IXGBE_SUCCESS;
3518 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3519 * @hw: pointer to hardware structure
3520 * @san_mac_addr: SAN MAC address
3522 * Write a SAN MAC address to the EEPROM.
3524 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3527 u16 san_mac_data, san_mac_offset;
3530 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3532 /* Look for SAN mac address pointer. If not defined, return */
3533 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3534 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3535 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3537 /* Make sure we know which port we need to write */
3538 hw->mac.ops.set_lan_id(hw);
3539 /* Apply the port offset to the address offset */
3540 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3541 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3543 for (i = 0; i < 3; i++) {
3544 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3545 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3546 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3550 return IXGBE_SUCCESS;
3554 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3555 * @hw: pointer to hardware structure
3557 * Read PCIe configuration space, and get the MSI-X vector count from
3558 * the capabilities table.
3560 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3566 switch (hw->mac.type) {
3567 case ixgbe_mac_82598EB:
3568 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3569 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3571 case ixgbe_mac_82599EB:
3572 case ixgbe_mac_X540:
3573 case ixgbe_mac_X550:
3574 case ixgbe_mac_X550EM_x:
3575 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3576 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3582 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3583 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3584 if (IXGBE_REMOVED(hw->hw_addr))
3586 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3588 /* MSI-X count is zero-based in HW */
3591 if (msix_count > max_msix_count)
3592 msix_count = max_msix_count;
3598 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3599 * @hw: pointer to hardware structure
3600 * @addr: Address to put into receive address register
3601 * @vmdq: VMDq pool to assign
3603 * Puts an ethernet address into a receive address register, or
3604 * finds the rar that it is aleady in; adds to the pool list
3606 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3608 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3609 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3611 u32 rar_low, rar_high;
3612 u32 addr_low, addr_high;
3614 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3616 /* swap bytes for HW little endian */
3617 addr_low = addr[0] | (addr[1] << 8)
3620 addr_high = addr[4] | (addr[5] << 8);
3623 * Either find the mac_id in rar or find the first empty space.
3624 * rar_highwater points to just after the highest currently used
3625 * rar in order to shorten the search. It grows when we add a new
3628 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3629 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3631 if (((IXGBE_RAH_AV & rar_high) == 0)
3632 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3633 first_empty_rar = rar;
3634 } else if ((rar_high & 0xFFFF) == addr_high) {
3635 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3636 if (rar_low == addr_low)
3637 break; /* found it already in the rars */
3641 if (rar < hw->mac.rar_highwater) {
3642 /* already there so just add to the pool bits */
3643 ixgbe_set_vmdq(hw, rar, vmdq);
3644 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3645 /* stick it into first empty RAR slot we found */
3646 rar = first_empty_rar;
3647 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3648 } else if (rar == hw->mac.rar_highwater) {
3649 /* add it to the top of the list and inc the highwater mark */
3650 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3651 hw->mac.rar_highwater++;
3652 } else if (rar >= hw->mac.num_rar_entries) {
3653 return IXGBE_ERR_INVALID_MAC_ADDR;
3657 * If we found rar[0], make sure the default pool bit (we use pool 0)
3658 * remains cleared to be sure default pool packets will get delivered
3661 ixgbe_clear_vmdq(hw, rar, 0);
3667 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3668 * @hw: pointer to hardware struct
3669 * @rar: receive address register index to disassociate
3670 * @vmdq: VMDq pool index to remove from the rar
3672 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3674 u32 mpsar_lo, mpsar_hi;
3675 u32 rar_entries = hw->mac.num_rar_entries;
3677 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3679 /* Make sure we are using a valid rar index range */
3680 if (rar >= rar_entries) {
3681 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3682 "RAR index %d is out of range.\n", rar);
3683 return IXGBE_ERR_INVALID_ARGUMENT;
3686 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3687 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3689 if (IXGBE_REMOVED(hw->hw_addr))
3692 if (!mpsar_lo && !mpsar_hi)
3695 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3697 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3701 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3704 } else if (vmdq < 32) {
3705 mpsar_lo &= ~(1 << vmdq);
3706 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3708 mpsar_hi &= ~(1 << (vmdq - 32));
3709 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3712 /* was that the last pool using this rar? */
3713 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3714 hw->mac.ops.clear_rar(hw, rar);
3716 return IXGBE_SUCCESS;
3720 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3721 * @hw: pointer to hardware struct
3722 * @rar: receive address register index to associate with a VMDq index
3723 * @vmdq: VMDq pool index
3725 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3728 u32 rar_entries = hw->mac.num_rar_entries;
3730 DEBUGFUNC("ixgbe_set_vmdq_generic");
3732 /* Make sure we are using a valid rar index range */
3733 if (rar >= rar_entries) {
3734 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3735 "RAR index %d is out of range.\n", rar);
3736 return IXGBE_ERR_INVALID_ARGUMENT;
3740 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3742 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3744 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3745 mpsar |= 1 << (vmdq - 32);
3746 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3748 return IXGBE_SUCCESS;
3752 * This function should only be involved in the IOV mode.
3753 * In IOV mode, Default pool is next pool after the number of
3754 * VFs advertized and not 0.
3755 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3757 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3758 * @hw: pointer to hardware struct
3759 * @vmdq: VMDq pool index
3761 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3763 u32 rar = hw->mac.san_mac_rar_index;
3765 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3768 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3769 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3771 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3772 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3775 return IXGBE_SUCCESS;
3779 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3780 * @hw: pointer to hardware structure
3782 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3786 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3787 DEBUGOUT(" Clearing UTA\n");
3789 for (i = 0; i < 128; i++)
3790 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3792 return IXGBE_SUCCESS;
3796 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3797 * @hw: pointer to hardware structure
3798 * @vlan: VLAN id to write to VLAN filter
3800 * return the VLVF index where this VLAN id should be placed
3803 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3806 u32 first_empty_slot = 0;
3809 /* short cut the special case */
3814 * Search for the vlan id in the VLVF entries. Save off the first empty
3815 * slot found along the way
3817 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3818 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3819 if (!bits && !(first_empty_slot))
3820 first_empty_slot = regindex;
3821 else if ((bits & 0x0FFF) == vlan)
3826 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3827 * in the VLVF. Else use the first empty VLVF register for this
3830 if (regindex >= IXGBE_VLVF_ENTRIES) {
3831 if (first_empty_slot)
3832 regindex = first_empty_slot;
3834 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3835 "No space in VLVF.\n");
3836 regindex = IXGBE_ERR_NO_SPACE;
3844 * ixgbe_set_vfta_generic - Set VLAN filter table
3845 * @hw: pointer to hardware structure
3846 * @vlan: VLAN id to write to VLAN filter
3847 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3848 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3850 * Turn on/off specified VLAN in the VLAN filter table.
3852 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3859 s32 ret_val = IXGBE_SUCCESS;
3860 bool vfta_changed = false;
3862 DEBUGFUNC("ixgbe_set_vfta_generic");
3865 return IXGBE_ERR_PARAM;
3868 * this is a 2 part operation - first the VFTA, then the
3869 * VLVF and VLVFB if VT Mode is set
3870 * We don't write the VFTA until we know the VLVF part succeeded.
3874 * The VFTA is a bitstring made up of 128 32-bit registers
3875 * that enable the particular VLAN id, much like the MTA:
3876 * bits[11-5]: which register
3877 * bits[4-0]: which bit in the register
3879 regindex = (vlan >> 5) & 0x7F;
3880 bitindex = vlan & 0x1F;
3881 targetbit = (1 << bitindex);
3882 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3885 if (!(vfta & targetbit)) {
3887 vfta_changed = true;
3890 if ((vfta & targetbit)) {
3892 vfta_changed = true;
3897 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3899 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3901 if (ret_val != IXGBE_SUCCESS)
3905 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3907 return IXGBE_SUCCESS;
3911 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3912 * @hw: pointer to hardware structure
3913 * @vlan: VLAN id to write to VLAN filter
3914 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3915 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3916 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3919 * Turn on/off specified bit in VLVF table.
3921 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3922 bool vlan_on, bool *vfta_changed)
3926 DEBUGFUNC("ixgbe_set_vlvf_generic");
3929 return IXGBE_ERR_PARAM;
3931 /* If VT Mode is set
3933 * make sure the vlan is in VLVF
3934 * set the vind bit in the matching VLVFB
3936 * clear the pool bit and possibly the vind
3938 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3939 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3943 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3948 /* set the pool bit */
3950 bits = IXGBE_READ_REG(hw,
3951 IXGBE_VLVFB(vlvf_index * 2));
3952 bits |= (1 << vind);
3954 IXGBE_VLVFB(vlvf_index * 2),
3957 bits = IXGBE_READ_REG(hw,
3958 IXGBE_VLVFB((vlvf_index * 2) + 1));
3959 bits |= (1 << (vind - 32));
3961 IXGBE_VLVFB((vlvf_index * 2) + 1),
3965 /* clear the pool bit */
3967 bits = IXGBE_READ_REG(hw,
3968 IXGBE_VLVFB(vlvf_index * 2));
3969 bits &= ~(1 << vind);
3971 IXGBE_VLVFB(vlvf_index * 2),
3973 bits |= IXGBE_READ_REG(hw,
3974 IXGBE_VLVFB((vlvf_index * 2) + 1));
3976 bits = IXGBE_READ_REG(hw,
3977 IXGBE_VLVFB((vlvf_index * 2) + 1));
3978 bits &= ~(1 << (vind - 32));
3980 IXGBE_VLVFB((vlvf_index * 2) + 1),
3982 bits |= IXGBE_READ_REG(hw,
3983 IXGBE_VLVFB(vlvf_index * 2));
3988 * If there are still bits set in the VLVFB registers
3989 * for the VLAN ID indicated we need to see if the
3990 * caller is requesting that we clear the VFTA entry bit.
3991 * If the caller has requested that we clear the VFTA
3992 * entry bit but there are still pools/VFs using this VLAN
3993 * ID entry then ignore the request. We're not worried
3994 * about the case where we're turning the VFTA VLAN ID
3995 * entry bit on, only when requested to turn it off as
3996 * there may be multiple pools and/or VFs using the
3997 * VLAN ID entry. In that case we cannot clear the
3998 * VFTA bit until all pools/VFs using that VLAN ID have also
3999 * been cleared. This will be indicated by "bits" being
4003 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
4004 (IXGBE_VLVF_VIEN | vlan));
4005 if ((!vlan_on) && (vfta_changed != NULL)) {
4006 /* someone wants to clear the vfta entry
4007 * but some pools/VFs are still using it.
4009 *vfta_changed = false;
4012 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4015 return IXGBE_SUCCESS;
4019 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4020 * @hw: pointer to hardware structure
4022 * Clears the VLAN filer table, and the VMDq index associated with the filter
4024 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4028 DEBUGFUNC("ixgbe_clear_vfta_generic");
4030 for (offset = 0; offset < hw->mac.vft_size; offset++)
4031 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4033 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4034 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4035 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4036 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
4039 return IXGBE_SUCCESS;
4043 * ixgbe_check_mac_link_generic - Determine link and speed status
4044 * @hw: pointer to hardware structure
4045 * @speed: pointer to link speed
4046 * @link_up: true when link is up
4047 * @link_up_wait_to_complete: bool used to wait for link up or not
4049 * Reads the links register to determine if link is up and the current speed
4051 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4052 bool *link_up, bool link_up_wait_to_complete)
4054 u32 links_reg, links_orig;
4057 DEBUGFUNC("ixgbe_check_mac_link_generic");
4059 /* clear the old state */
4060 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4062 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4064 if (links_orig != links_reg) {
4065 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4066 links_orig, links_reg);
4069 if (link_up_wait_to_complete) {
4070 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4071 if (links_reg & IXGBE_LINKS_UP) {
4078 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4081 if (links_reg & IXGBE_LINKS_UP)
4087 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4088 case IXGBE_LINKS_SPEED_10G_82599:
4089 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4090 if (hw->mac.type >= ixgbe_mac_X550) {
4091 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4092 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4095 case IXGBE_LINKS_SPEED_1G_82599:
4096 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4098 case IXGBE_LINKS_SPEED_100_82599:
4099 *speed = IXGBE_LINK_SPEED_100_FULL;
4100 if (hw->mac.type >= ixgbe_mac_X550) {
4101 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4102 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4106 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4109 return IXGBE_SUCCESS;
4113 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4115 * @hw: pointer to hardware structure
4116 * @wwnn_prefix: the alternative WWNN prefix
4117 * @wwpn_prefix: the alternative WWPN prefix
4119 * This function will read the EEPROM from the alternative SAN MAC address
4120 * block to check the support for the alternative WWNN/WWPN prefix support.
4122 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4126 u16 alt_san_mac_blk_offset;
4128 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4130 /* clear output first */
4131 *wwnn_prefix = 0xFFFF;
4132 *wwpn_prefix = 0xFFFF;
4134 /* check if alternative SAN MAC is supported */
4135 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4136 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4137 goto wwn_prefix_err;
4139 if ((alt_san_mac_blk_offset == 0) ||
4140 (alt_san_mac_blk_offset == 0xFFFF))
4141 goto wwn_prefix_out;
4143 /* check capability in alternative san mac address block */
4144 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4145 if (hw->eeprom.ops.read(hw, offset, &caps))
4146 goto wwn_prefix_err;
4147 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4148 goto wwn_prefix_out;
4150 /* get the corresponding prefix for WWNN/WWPN */
4151 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4152 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4153 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4154 "eeprom read at offset %d failed", offset);
4157 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4158 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4159 goto wwn_prefix_err;
4162 return IXGBE_SUCCESS;
4165 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4166 "eeprom read at offset %d failed", offset);
4167 return IXGBE_SUCCESS;
4171 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4172 * @hw: pointer to hardware structure
4173 * @bs: the fcoe boot status
4175 * This function will read the FCOE boot status from the iSCSI FCOE block
4177 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4179 u16 offset, caps, flags;
4182 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4184 /* clear output first */
4185 *bs = ixgbe_fcoe_bootstatus_unavailable;
4187 /* check if FCOE IBA block is present */
4188 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4189 status = hw->eeprom.ops.read(hw, offset, &caps);
4190 if (status != IXGBE_SUCCESS)
4193 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4196 /* check if iSCSI FCOE block is populated */
4197 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4198 if (status != IXGBE_SUCCESS)
4201 if ((offset == 0) || (offset == 0xFFFF))
4204 /* read fcoe flags in iSCSI FCOE block */
4205 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4206 status = hw->eeprom.ops.read(hw, offset, &flags);
4207 if (status != IXGBE_SUCCESS)
4210 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4211 *bs = ixgbe_fcoe_bootstatus_enabled;
4213 *bs = ixgbe_fcoe_bootstatus_disabled;
4220 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4221 * @hw: pointer to hardware structure
4222 * @enable: enable or disable switch for anti-spoofing
4223 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4226 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4229 int pf_target_reg = pf >> 3;
4230 int pf_target_shift = pf % 8;
4233 if (hw->mac.type == ixgbe_mac_82598EB)
4237 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4240 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4241 * MAC anti-spoof enables in each register array element.
4243 for (j = 0; j < pf_target_reg; j++)
4244 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4247 * The PF should be allowed to spoof so that it can support
4248 * emulation mode NICs. Do not set the bits assigned to the PF
4250 pfvfspoof &= (1 << pf_target_shift) - 1;
4251 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4254 * Remaining pools belong to the PF so they do not need to have
4255 * anti-spoofing enabled.
4257 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4258 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4262 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4263 * @hw: pointer to hardware structure
4264 * @enable: enable or disable switch for VLAN anti-spoofing
4265 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4268 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4270 int vf_target_reg = vf >> 3;
4271 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4274 if (hw->mac.type == ixgbe_mac_82598EB)
4277 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4279 pfvfspoof |= (1 << vf_target_shift);
4281 pfvfspoof &= ~(1 << vf_target_shift);
4282 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4286 * ixgbe_get_device_caps_generic - Get additional device capabilities
4287 * @hw: pointer to hardware structure
4288 * @device_caps: the EEPROM word with the extra device capabilities
4290 * This function will read the EEPROM location for the device capabilities,
4291 * and return the word through device_caps.
4293 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4295 DEBUGFUNC("ixgbe_get_device_caps_generic");
4297 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4299 return IXGBE_SUCCESS;
4303 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4304 * @hw: pointer to hardware structure
4307 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4312 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4314 /* Enable relaxed ordering */
4315 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4316 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4317 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4318 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4321 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4322 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4323 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4324 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4325 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4331 * ixgbe_calculate_checksum - Calculate checksum for buffer
4332 * @buffer: pointer to EEPROM
4333 * @length: size of EEPROM to calculate a checksum for
4334 * Calculates the checksum for some buffer on a specified length. The
4335 * checksum calculated is returned.
4337 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4342 DEBUGFUNC("ixgbe_calculate_checksum");
4347 for (i = 0; i < length; i++)
4350 return (u8) (0 - sum);
4354 * ixgbe_host_interface_command - Issue command to manageability block
4355 * @hw: pointer to the HW structure
4356 * @buffer: contains the command to write and where the return status will
4358 * @length: length of buffer, must be multiple of 4 bytes
4359 * @timeout: time in ms to wait for command completion
4360 * @return_data: read and return data from the buffer (true) or not (false)
4361 * Needed because FW structures are big endian and decoding of
4362 * these fields can be 8 bit or 16 bit based on command. Decoding
4363 * is not easily understood without making a table of commands.
4364 * So we will leave this up to the caller to read back the data
4367 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4368 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4370 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4371 u32 length, u32 timeout, bool return_data)
4373 u32 hicr, i, bi, fwsts;
4374 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4378 DEBUGFUNC("ixgbe_host_interface_command");
4380 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4381 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4382 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4384 /* Set bit 9 of FWSTS clearing FW reset indication */
4385 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4386 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4388 /* Check that the host interface is enabled. */
4389 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4390 if ((hicr & IXGBE_HICR_EN) == 0) {
4391 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4392 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4395 /* Calculate length in DWORDs. We must be DWORD aligned */
4396 if ((length % (sizeof(u32))) != 0) {
4397 DEBUGOUT("Buffer length failure, not aligned to dword");
4398 return IXGBE_ERR_INVALID_ARGUMENT;
4401 dword_len = length >> 2;
4403 /* The device driver writes the relevant command block
4404 * into the ram area.
4406 for (i = 0; i < dword_len; i++)
4407 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4408 i, IXGBE_CPU_TO_LE32(buffer[i]));
4410 /* Setting this bit tells the ARC that a new command is pending. */
4411 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4413 for (i = 0; i < timeout; i++) {
4414 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4415 if (!(hicr & IXGBE_HICR_C))
4420 /* Check command completion */
4421 if ((timeout != 0 && i == timeout) ||
4422 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4423 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4424 "Command has failed with no status valid.\n");
4425 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4431 /* Calculate length in DWORDs */
4432 dword_len = hdr_size >> 2;
4434 /* first pull in the header so we know the buffer length */
4435 for (bi = 0; bi < dword_len; bi++) {
4436 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4437 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4440 /* If there is any thing in data position pull it in */
4441 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4445 if (length < buf_len + hdr_size) {
4446 DEBUGOUT("Buffer not large enough for reply message.\n");
4447 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4450 /* Calculate length in DWORDs, add 3 for odd lengths */
4451 dword_len = (buf_len + 3) >> 2;
4453 /* Pull in the rest of the buffer (bi is where we left off) */
4454 for (; bi <= dword_len; bi++) {
4455 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4456 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4463 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4464 * @hw: pointer to the HW structure
4465 * @maj: driver version major number
4466 * @min: driver version minor number
4467 * @build: driver version build number
4468 * @sub: driver version sub build number
4470 * Sends driver version number to firmware through the manageability
4471 * block. On success return IXGBE_SUCCESS
4472 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4473 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4475 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4478 struct ixgbe_hic_drv_info fw_cmd;
4480 s32 ret_val = IXGBE_SUCCESS;
4482 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4484 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4486 ret_val = IXGBE_ERR_SWFW_SYNC;
4490 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4491 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4492 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4493 fw_cmd.port_num = (u8)hw->bus.func;
4494 fw_cmd.ver_maj = maj;
4495 fw_cmd.ver_min = min;
4496 fw_cmd.ver_build = build;
4497 fw_cmd.ver_sub = sub;
4498 fw_cmd.hdr.checksum = 0;
4499 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4500 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4504 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4505 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4507 IXGBE_HI_COMMAND_TIMEOUT,
4509 if (ret_val != IXGBE_SUCCESS)
4512 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4513 FW_CEM_RESP_STATUS_SUCCESS)
4514 ret_val = IXGBE_SUCCESS;
4516 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4521 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4527 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4528 * @hw: pointer to hardware structure
4529 * @num_pb: number of packet buffers to allocate
4530 * @headroom: reserve n KB of headroom
4531 * @strategy: packet buffer allocation strategy
4533 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4536 u32 pbsize = hw->mac.rx_pb_size;
4538 u32 rxpktsize, txpktsize, txpbthresh;
4540 /* Reserve headroom */
4546 /* Divide remaining packet buffer space amongst the number of packet
4547 * buffers requested using supplied strategy.
4550 case PBA_STRATEGY_WEIGHTED:
4551 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4552 * buffer with 5/8 of the packet buffer space.
4554 rxpktsize = (pbsize * 5) / (num_pb * 4);
4555 pbsize -= rxpktsize * (num_pb / 2);
4556 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4557 for (; i < (num_pb / 2); i++)
4558 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4559 /* Fall through to configure remaining packet buffers */
4560 case PBA_STRATEGY_EQUAL:
4561 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4562 for (; i < num_pb; i++)
4563 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4569 /* Only support an equally distributed Tx packet buffer strategy. */
4570 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4571 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4572 for (i = 0; i < num_pb; i++) {
4573 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4574 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4577 /* Clear unused TCs, if any, to zero buffer size*/
4578 for (; i < IXGBE_MAX_PB; i++) {
4579 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4580 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4581 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4586 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4587 * @hw: pointer to the hardware structure
4589 * The 82599 and x540 MACs can experience issues if TX work is still pending
4590 * when a reset occurs. This function prevents this by flushing the PCIe
4591 * buffers on the system.
4593 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4595 u32 gcr_ext, hlreg0, i, poll;
4599 * If double reset is not requested then all transactions should
4600 * already be clear and as such there is no work to do
4602 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4606 * Set loopback enable to prevent any transmits from being sent
4607 * should the link come up. This assumes that the RXCTRL.RXEN bit
4608 * has already been cleared.
4610 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4611 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4613 /* Wait for a last completion before clearing buffers */
4614 IXGBE_WRITE_FLUSH(hw);
4618 * Before proceeding, make sure that the PCIe block does not have
4619 * transactions pending.
4621 poll = ixgbe_pcie_timeout_poll(hw);
4622 for (i = 0; i < poll; i++) {
4624 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4625 if (IXGBE_REMOVED(hw->hw_addr))
4627 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4632 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4633 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4634 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4635 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4637 /* Flush all writes and allow 20usec for all transactions to clear */
4638 IXGBE_WRITE_FLUSH(hw);
4641 /* restore previous register values */
4642 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4643 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4646 STATIC const u8 ixgbe_emc_temp_data[4] = {
4647 IXGBE_EMC_INTERNAL_DATA,
4648 IXGBE_EMC_DIODE1_DATA,
4649 IXGBE_EMC_DIODE2_DATA,
4650 IXGBE_EMC_DIODE3_DATA
4652 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4653 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4654 IXGBE_EMC_DIODE1_THERM_LIMIT,
4655 IXGBE_EMC_DIODE2_THERM_LIMIT,
4656 IXGBE_EMC_DIODE3_THERM_LIMIT
4660 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4661 * @hw: pointer to hardware structure
4662 * @data: pointer to the thermal sensor data structure
4664 * Returns the thermal sensor data structure
4666 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4668 s32 status = IXGBE_SUCCESS;
4676 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4678 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4680 /* Only support thermal sensors attached to 82599 physical port 0 */
4681 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4682 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4683 status = IXGBE_NOT_IMPLEMENTED;
4687 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4691 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4692 status = IXGBE_NOT_IMPLEMENTED;
4696 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4700 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4701 != IXGBE_ETS_TYPE_EMC) {
4702 status = IXGBE_NOT_IMPLEMENTED;
4706 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4707 if (num_sensors > IXGBE_MAX_SENSORS)
4708 num_sensors = IXGBE_MAX_SENSORS;
4710 for (i = 0; i < num_sensors; i++) {
4711 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4716 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4717 IXGBE_ETS_DATA_INDEX_SHIFT);
4718 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4719 IXGBE_ETS_DATA_LOC_SHIFT);
4721 if (sensor_location != 0) {
4722 status = hw->phy.ops.read_i2c_byte(hw,
4723 ixgbe_emc_temp_data[sensor_index],
4724 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4725 &data->sensor[i].temp);
4735 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4736 * @hw: pointer to hardware structure
4738 * Inits the thermal sensor thresholds according to the NVM map
4739 * and save off the threshold and location values into mac.thermal_sensor_data
4741 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4743 s32 status = IXGBE_SUCCESS;
4748 u8 low_thresh_delta;
4754 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4756 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4758 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4760 /* Only support thermal sensors attached to 82599 physical port 0 */
4761 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4762 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4763 return IXGBE_NOT_IMPLEMENTED;
4765 offset = IXGBE_ETS_CFG;
4766 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4768 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4769 return IXGBE_NOT_IMPLEMENTED;
4771 offset = ets_offset;
4772 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4774 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4775 != IXGBE_ETS_TYPE_EMC)
4776 return IXGBE_NOT_IMPLEMENTED;
4778 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4779 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4780 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4782 for (i = 0; i < num_sensors; i++) {
4783 offset = ets_offset + 1 + i;
4784 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4785 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4786 "eeprom read at offset %d failed",
4790 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4791 IXGBE_ETS_DATA_INDEX_SHIFT);
4792 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4793 IXGBE_ETS_DATA_LOC_SHIFT);
4794 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4796 hw->phy.ops.write_i2c_byte(hw,
4797 ixgbe_emc_therm_limit[sensor_index],
4798 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4800 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4801 data->sensor[i].location = sensor_location;
4802 data->sensor[i].caution_thresh = therm_limit;
4803 data->sensor[i].max_op_thresh = therm_limit -
4810 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4811 "eeprom read at offset %d failed", offset);
4812 return IXGBE_NOT_IMPLEMENTED;
4817 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4818 * @hw: pointer to hardware structure
4819 * @map: pointer to u8 arr for returning map
4821 * Read the rtrup2tc HW register and resolve its content into map
4823 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4827 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4828 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4829 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4830 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4834 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4839 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4840 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4841 if (hw->mac.type != ixgbe_mac_82598EB) {
4842 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4843 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4844 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4845 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4846 hw->mac.set_lben = true;
4848 hw->mac.set_lben = false;
4851 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4852 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4856 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4861 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4862 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4864 if (hw->mac.type != ixgbe_mac_82598EB) {
4865 if (hw->mac.set_lben) {
4866 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4867 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4868 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4869 hw->mac.set_lben = false;
4875 * ixgbe_mng_present - returns true when management capability is present
4876 * @hw: pointer to hardware structure
4878 bool ixgbe_mng_present(struct ixgbe_hw *hw)
4882 if (hw->mac.type < ixgbe_mac_82599EB)
4885 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4886 fwsm &= IXGBE_FWSM_MODE_MASK;
4887 return fwsm == IXGBE_FWSM_FW_MODE_PT;
4891 * ixgbe_mng_enabled - Is the manageability engine enabled?
4892 * @hw: pointer to hardware structure
4894 * Returns true if the manageability engine is enabled.
4896 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
4898 u32 fwsm, manc, factps;
4900 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
4901 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
4904 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
4905 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
4908 if (hw->mac.type <= ixgbe_mac_X540) {
4909 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
4910 if (factps & IXGBE_FACTPS_MNGCG)
4918 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
4919 * @hw: pointer to hardware structure
4920 * @speed: new link speed
4921 * @autoneg_wait_to_complete: true when waiting for completion is needed
4923 * Set the link speed in the MAC and/or PHY register and restarts link.
4925 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4926 ixgbe_link_speed speed,
4927 bool autoneg_wait_to_complete)
4929 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4930 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4931 s32 status = IXGBE_SUCCESS;
4934 bool autoneg, link_up = false;
4936 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
4938 /* Mask off requested but non-supported speeds */
4939 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
4940 if (status != IXGBE_SUCCESS)
4943 speed &= link_speed;
4945 /* Try each speed one by one, highest priority first. We do this in
4946 * software because 10Gb fiber doesn't support speed autonegotiation.
4948 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4950 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4952 /* If we already have link at this speed, just jump out */
4953 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
4954 if (status != IXGBE_SUCCESS)
4957 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
4960 /* Set the module link speed */
4961 switch (hw->phy.media_type) {
4962 case ixgbe_media_type_fiber:
4963 ixgbe_set_rate_select_speed(hw,
4964 IXGBE_LINK_SPEED_10GB_FULL);
4966 case ixgbe_media_type_fiber_qsfp:
4967 /* QSFP module automatically detects MAC link speed */
4970 DEBUGOUT("Unexpected media type.\n");
4974 /* Allow module to change analog characteristics (1G->10G) */
4977 status = ixgbe_setup_mac_link(hw,
4978 IXGBE_LINK_SPEED_10GB_FULL,
4979 autoneg_wait_to_complete);
4980 if (status != IXGBE_SUCCESS)
4983 /* Flap the Tx laser if it has not already been done */
4984 ixgbe_flap_tx_laser(hw);
4986 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
4987 * Section 73.10.2, we may have to wait up to 500ms if KR is
4988 * attempted. 82599 uses the same timing for 10g SFI.
4990 for (i = 0; i < 5; i++) {
4991 /* Wait for the link partner to also set speed */
4994 /* If we have link, just jump out */
4995 status = ixgbe_check_link(hw, &link_speed,
4997 if (status != IXGBE_SUCCESS)
5005 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5007 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5008 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5010 /* If we already have link at this speed, just jump out */
5011 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5012 if (status != IXGBE_SUCCESS)
5015 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
5018 /* Set the module link speed */
5019 switch (hw->phy.media_type) {
5020 case ixgbe_media_type_fiber:
5021 ixgbe_set_rate_select_speed(hw,
5022 IXGBE_LINK_SPEED_1GB_FULL);
5024 case ixgbe_media_type_fiber_qsfp:
5025 /* QSFP module automatically detects link speed */
5028 DEBUGOUT("Unexpected media type.\n");
5032 /* Allow module to change analog characteristics (10G->1G) */
5035 status = ixgbe_setup_mac_link(hw,
5036 IXGBE_LINK_SPEED_1GB_FULL,
5037 autoneg_wait_to_complete);
5038 if (status != IXGBE_SUCCESS)
5041 /* Flap the Tx laser if it has not already been done */
5042 ixgbe_flap_tx_laser(hw);
5044 /* Wait for the link partner to also set speed */
5047 /* If we have link, just jump out */
5048 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5049 if (status != IXGBE_SUCCESS)
5056 /* We didn't get link. Configure back to the highest speed we tried,
5057 * (if there was more than one). We call ourselves back with just the
5058 * single highest speed that the user requested.
5061 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5063 autoneg_wait_to_complete);
5066 /* Set autoneg_advertised value based on input link speed */
5067 hw->phy.autoneg_advertised = 0;
5069 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5070 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5072 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5073 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5079 * ixgbe_set_soft_rate_select_speed - Set module link speed
5080 * @hw: pointer to hardware structure
5081 * @speed: link speed to set
5083 * Set module link speed via the soft rate select.
5085 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5086 ixgbe_link_speed speed)
5092 case IXGBE_LINK_SPEED_10GB_FULL:
5093 /* one bit mask same as setting on */
5094 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5096 case IXGBE_LINK_SPEED_1GB_FULL:
5097 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5100 DEBUGOUT("Invalid fixed module speed\n");
5105 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5106 IXGBE_I2C_EEPROM_DEV_ADDR2,
5109 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5113 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5115 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5116 IXGBE_I2C_EEPROM_DEV_ADDR2,
5119 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5124 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5125 IXGBE_I2C_EEPROM_DEV_ADDR2,
5128 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5132 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5134 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5135 IXGBE_I2C_EEPROM_DEV_ADDR2,
5138 DEBUGOUT("Failed to write Rx Rate Select RS1\n");