1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
116 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
118 /* RAR, Multicast, VLAN */
119 mac->ops.set_rar = ixgbe_set_rar_generic;
120 mac->ops.clear_rar = ixgbe_clear_rar_generic;
121 mac->ops.insert_mac_addr = NULL;
122 mac->ops.set_vmdq = NULL;
123 mac->ops.clear_vmdq = NULL;
124 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 mac->ops.disable_mc = ixgbe_disable_mc_generic;
129 mac->ops.clear_vfta = NULL;
130 mac->ops.set_vfta = NULL;
131 mac->ops.set_vlvf = NULL;
132 mac->ops.init_uta_tables = NULL;
133 mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 mac->ops.disable_rx = ixgbe_disable_rx_generic;
137 mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 mac->ops.setup_fc = ixgbe_setup_fc_generic;
139 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 mac->ops.get_link_capabilities = NULL;
143 mac->ops.setup_link = NULL;
144 mac->ops.check_link = NULL;
145 mac->ops.dmac_config = NULL;
146 mac->ops.dmac_update_tcs = NULL;
147 mac->ops.dmac_config_tcs = NULL;
149 return IXGBE_SUCCESS;
153 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
155 * @hw: pointer to hardware structure
157 * This function returns true if the device supports flow control
158 * autonegotiation, and false if it does not.
161 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
163 bool supported = false;
164 ixgbe_link_speed speed;
167 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
169 switch (hw->phy.media_type) {
170 case ixgbe_media_type_fiber_fixed:
171 case ixgbe_media_type_fiber_qsfp:
172 case ixgbe_media_type_fiber:
173 /* flow control autoneg black list */
174 switch (hw->device_id) {
175 case IXGBE_DEV_ID_X550EM_A_SFP:
176 case IXGBE_DEV_ID_X550EM_A_SFP_N:
177 case IXGBE_DEV_ID_X550EM_A_QSFP:
178 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
182 hw->mac.ops.check_link(hw, &speed, &link_up, false);
183 /* if link is down, assume supported */
185 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
192 case ixgbe_media_type_backplane:
193 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
198 case ixgbe_media_type_copper:
199 /* only some copper devices support flow control autoneg */
200 switch (hw->device_id) {
201 case IXGBE_DEV_ID_82599_T3_LOM:
202 case IXGBE_DEV_ID_X540T:
203 case IXGBE_DEV_ID_X540T1:
204 case IXGBE_DEV_ID_X540_BYPASS:
205 case IXGBE_DEV_ID_X550T:
206 case IXGBE_DEV_ID_X550T1:
207 case IXGBE_DEV_ID_X550EM_X_10G_T:
208 case IXGBE_DEV_ID_X550EM_A_10G_T:
209 case IXGBE_DEV_ID_X550EM_A_1G_T:
210 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
221 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
222 "Device %x does not support flow control autoneg",
228 * ixgbe_setup_fc_generic - Set up flow control
229 * @hw: pointer to hardware structure
231 * Called at init time to set up flow control.
233 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
235 s32 ret_val = IXGBE_SUCCESS;
236 u32 reg = 0, reg_bp = 0;
240 DEBUGFUNC("ixgbe_setup_fc_generic");
242 /* Validate the requested mode */
243 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
244 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
245 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
246 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
251 * 10gig parts do not have a word in the EEPROM to determine the
252 * default flow control setting, so we explicitly set it to full.
254 if (hw->fc.requested_mode == ixgbe_fc_default)
255 hw->fc.requested_mode = ixgbe_fc_full;
258 * Set up the 1G and 10G flow control advertisement registers so the
259 * HW will be able to do fc autoneg once the cable is plugged in. If
260 * we link at 10G, the 1G advertisement is harmless and vice versa.
262 switch (hw->phy.media_type) {
263 case ixgbe_media_type_backplane:
264 /* some MAC's need RMW protection on AUTOC */
265 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
266 if (ret_val != IXGBE_SUCCESS)
269 /* fall through - only backplane uses autoc */
270 case ixgbe_media_type_fiber_fixed:
271 case ixgbe_media_type_fiber_qsfp:
272 case ixgbe_media_type_fiber:
273 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
276 case ixgbe_media_type_copper:
277 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
278 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
285 * The possible values of fc.requested_mode are:
286 * 0: Flow control is completely disabled
287 * 1: Rx flow control is enabled (we can receive pause frames,
288 * but not send pause frames).
289 * 2: Tx flow control is enabled (we can send pause frames but
290 * we do not support receiving pause frames).
291 * 3: Both Rx and Tx flow control (symmetric) are enabled.
294 switch (hw->fc.requested_mode) {
296 /* Flow control completely disabled by software override. */
297 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
298 if (hw->phy.media_type == ixgbe_media_type_backplane)
299 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
300 IXGBE_AUTOC_ASM_PAUSE);
301 else if (hw->phy.media_type == ixgbe_media_type_copper)
302 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
304 case ixgbe_fc_tx_pause:
306 * Tx Flow control is enabled, and Rx Flow control is
307 * disabled by software override.
309 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
310 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
311 if (hw->phy.media_type == ixgbe_media_type_backplane) {
312 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
313 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
314 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
315 reg_cu |= IXGBE_TAF_ASM_PAUSE;
316 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
319 case ixgbe_fc_rx_pause:
321 * Rx Flow control is enabled and Tx Flow control is
322 * disabled by software override. Since there really
323 * isn't a way to advertise that we are capable of RX
324 * Pause ONLY, we will advertise that we support both
325 * symmetric and asymmetric Rx PAUSE, as such we fall
326 * through to the fc_full statement. Later, we will
327 * disable the adapter's ability to send PAUSE frames.
330 /* Flow control (both Rx and Tx) is enabled by SW override. */
331 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
332 if (hw->phy.media_type == ixgbe_media_type_backplane)
333 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
334 IXGBE_AUTOC_ASM_PAUSE;
335 else if (hw->phy.media_type == ixgbe_media_type_copper)
336 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
339 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
340 "Flow control param set incorrectly\n");
341 ret_val = IXGBE_ERR_CONFIG;
346 if (hw->mac.type < ixgbe_mac_X540) {
348 * Enable auto-negotiation between the MAC & PHY;
349 * the MAC will advertise clause 37 flow control.
351 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
352 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
354 /* Disable AN timeout */
355 if (hw->fc.strict_ieee)
356 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
358 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
359 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
363 * AUTOC restart handles negotiation of 1G and 10G on backplane
364 * and copper. There is no need to set the PCS1GCTL register.
367 if (hw->phy.media_type == ixgbe_media_type_backplane) {
368 reg_bp |= IXGBE_AUTOC_AN_RESTART;
369 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
372 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
373 (ixgbe_device_supports_autoneg_fc(hw))) {
374 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
375 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
378 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
384 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
385 * @hw: pointer to hardware structure
387 * Starts the hardware by filling the bus info structure and media type, clears
388 * all on chip counters, initializes receive address registers, multicast
389 * table, VLAN filter table, calls routine to set up link and flow control
390 * settings, and leaves transmit and receive units disabled and uninitialized
392 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
398 DEBUGFUNC("ixgbe_start_hw_generic");
400 /* Set the media type */
401 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
403 /* PHY ops initialization must be done in reset_hw() */
405 /* Clear the VLAN filter table */
406 hw->mac.ops.clear_vfta(hw);
408 /* Clear statistics registers */
409 hw->mac.ops.clear_hw_cntrs(hw);
411 /* Set No Snoop Disable */
412 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
413 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
414 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
415 IXGBE_WRITE_FLUSH(hw);
417 /* Setup flow control */
418 ret_val = ixgbe_setup_fc(hw);
419 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
420 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
424 /* Cache bit indicating need for crosstalk fix */
425 switch (hw->mac.type) {
426 case ixgbe_mac_82599EB:
427 case ixgbe_mac_X550EM_x:
428 case ixgbe_mac_X550EM_a:
429 hw->mac.ops.get_device_caps(hw, &device_caps);
430 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
431 hw->need_crosstalk_fix = false;
433 hw->need_crosstalk_fix = true;
436 hw->need_crosstalk_fix = false;
440 /* Clear adapter stopped flag */
441 hw->adapter_stopped = false;
443 return IXGBE_SUCCESS;
447 * ixgbe_start_hw_gen2 - Init sequence for common device family
448 * @hw: pointer to hw structure
450 * Performs the init sequence common to the second generation
452 * Devices in the second generation:
456 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
461 /* Clear the rate limiters */
462 for (i = 0; i < hw->mac.max_tx_queues; i++) {
463 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
464 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
466 IXGBE_WRITE_FLUSH(hw);
468 /* Disable relaxed ordering */
469 for (i = 0; i < hw->mac.max_tx_queues; i++) {
470 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
471 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
472 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
475 for (i = 0; i < hw->mac.max_rx_queues; i++) {
476 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
477 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
478 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
479 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
482 return IXGBE_SUCCESS;
486 * ixgbe_init_hw_generic - Generic hardware initialization
487 * @hw: pointer to hardware structure
489 * Initialize the hardware by resetting the hardware, filling the bus info
490 * structure and media type, clears all on chip counters, initializes receive
491 * address registers, multicast table, VLAN filter table, calls routine to set
492 * up link and flow control settings, and leaves transmit and receive units
493 * disabled and uninitialized
495 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
499 DEBUGFUNC("ixgbe_init_hw_generic");
501 /* Reset the hardware */
502 status = hw->mac.ops.reset_hw(hw);
504 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
506 status = hw->mac.ops.start_hw(hw);
509 /* Initialize the LED link active for LED blink support */
510 if (hw->mac.ops.init_led_link_act)
511 hw->mac.ops.init_led_link_act(hw);
513 if (status != IXGBE_SUCCESS)
514 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
520 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
521 * @hw: pointer to hardware structure
523 * Clears all hardware statistics counters by reading them from the hardware
524 * Statistics counters are clear on read.
526 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
530 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
532 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
533 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
534 IXGBE_READ_REG(hw, IXGBE_ERRBC);
535 IXGBE_READ_REG(hw, IXGBE_MSPDC);
536 for (i = 0; i < 8; i++)
537 IXGBE_READ_REG(hw, IXGBE_MPC(i));
539 IXGBE_READ_REG(hw, IXGBE_MLFC);
540 IXGBE_READ_REG(hw, IXGBE_MRFC);
541 IXGBE_READ_REG(hw, IXGBE_RLEC);
542 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
543 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
544 if (hw->mac.type >= ixgbe_mac_82599EB) {
545 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
546 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
548 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
549 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
552 for (i = 0; i < 8; i++) {
553 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
554 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
555 if (hw->mac.type >= ixgbe_mac_82599EB) {
556 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
557 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
559 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
560 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
563 if (hw->mac.type >= ixgbe_mac_82599EB)
564 for (i = 0; i < 8; i++)
565 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
566 IXGBE_READ_REG(hw, IXGBE_PRC64);
567 IXGBE_READ_REG(hw, IXGBE_PRC127);
568 IXGBE_READ_REG(hw, IXGBE_PRC255);
569 IXGBE_READ_REG(hw, IXGBE_PRC511);
570 IXGBE_READ_REG(hw, IXGBE_PRC1023);
571 IXGBE_READ_REG(hw, IXGBE_PRC1522);
572 IXGBE_READ_REG(hw, IXGBE_GPRC);
573 IXGBE_READ_REG(hw, IXGBE_BPRC);
574 IXGBE_READ_REG(hw, IXGBE_MPRC);
575 IXGBE_READ_REG(hw, IXGBE_GPTC);
576 IXGBE_READ_REG(hw, IXGBE_GORCL);
577 IXGBE_READ_REG(hw, IXGBE_GORCH);
578 IXGBE_READ_REG(hw, IXGBE_GOTCL);
579 IXGBE_READ_REG(hw, IXGBE_GOTCH);
580 if (hw->mac.type == ixgbe_mac_82598EB)
581 for (i = 0; i < 8; i++)
582 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
583 IXGBE_READ_REG(hw, IXGBE_RUC);
584 IXGBE_READ_REG(hw, IXGBE_RFC);
585 IXGBE_READ_REG(hw, IXGBE_ROC);
586 IXGBE_READ_REG(hw, IXGBE_RJC);
587 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
588 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
589 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
590 IXGBE_READ_REG(hw, IXGBE_TORL);
591 IXGBE_READ_REG(hw, IXGBE_TORH);
592 IXGBE_READ_REG(hw, IXGBE_TPR);
593 IXGBE_READ_REG(hw, IXGBE_TPT);
594 IXGBE_READ_REG(hw, IXGBE_PTC64);
595 IXGBE_READ_REG(hw, IXGBE_PTC127);
596 IXGBE_READ_REG(hw, IXGBE_PTC255);
597 IXGBE_READ_REG(hw, IXGBE_PTC511);
598 IXGBE_READ_REG(hw, IXGBE_PTC1023);
599 IXGBE_READ_REG(hw, IXGBE_PTC1522);
600 IXGBE_READ_REG(hw, IXGBE_MPTC);
601 IXGBE_READ_REG(hw, IXGBE_BPTC);
602 for (i = 0; i < 16; i++) {
603 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
604 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
605 if (hw->mac.type >= ixgbe_mac_82599EB) {
606 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
607 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
608 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
609 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
610 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
612 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
613 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
617 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
619 ixgbe_identify_phy(hw);
620 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
621 IXGBE_MDIO_PCS_DEV_TYPE, &i);
622 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
623 IXGBE_MDIO_PCS_DEV_TYPE, &i);
624 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
625 IXGBE_MDIO_PCS_DEV_TYPE, &i);
626 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
627 IXGBE_MDIO_PCS_DEV_TYPE, &i);
630 return IXGBE_SUCCESS;
634 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
635 * @hw: pointer to hardware structure
636 * @pba_num: stores the part number string from the EEPROM
637 * @pba_num_size: part number string buffer length
639 * Reads the part number string from the EEPROM.
641 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
650 DEBUGFUNC("ixgbe_read_pba_string_generic");
652 if (pba_num == NULL) {
653 DEBUGOUT("PBA string buffer was null\n");
654 return IXGBE_ERR_INVALID_ARGUMENT;
657 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
659 DEBUGOUT("NVM Read Error\n");
663 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
665 DEBUGOUT("NVM Read Error\n");
670 * if data is not ptr guard the PBA must be in legacy format which
671 * means pba_ptr is actually our second data word for the PBA number
672 * and we can decode it into an ascii string
674 if (data != IXGBE_PBANUM_PTR_GUARD) {
675 DEBUGOUT("NVM PBA number is not stored as string\n");
677 /* we will need 11 characters to store the PBA */
678 if (pba_num_size < 11) {
679 DEBUGOUT("PBA string buffer too small\n");
680 return IXGBE_ERR_NO_SPACE;
683 /* extract hex string from data and pba_ptr */
684 pba_num[0] = (data >> 12) & 0xF;
685 pba_num[1] = (data >> 8) & 0xF;
686 pba_num[2] = (data >> 4) & 0xF;
687 pba_num[3] = data & 0xF;
688 pba_num[4] = (pba_ptr >> 12) & 0xF;
689 pba_num[5] = (pba_ptr >> 8) & 0xF;
692 pba_num[8] = (pba_ptr >> 4) & 0xF;
693 pba_num[9] = pba_ptr & 0xF;
695 /* put a null character on the end of our string */
698 /* switch all the data but the '-' to hex char */
699 for (offset = 0; offset < 10; offset++) {
700 if (pba_num[offset] < 0xA)
701 pba_num[offset] += '0';
702 else if (pba_num[offset] < 0x10)
703 pba_num[offset] += 'A' - 0xA;
706 return IXGBE_SUCCESS;
709 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
711 DEBUGOUT("NVM Read Error\n");
715 if (length == 0xFFFF || length == 0) {
716 DEBUGOUT("NVM PBA number section invalid length\n");
717 return IXGBE_ERR_PBA_SECTION;
720 /* check if pba_num buffer is big enough */
721 if (pba_num_size < (((u32)length * 2) - 1)) {
722 DEBUGOUT("PBA string buffer too small\n");
723 return IXGBE_ERR_NO_SPACE;
726 /* trim pba length from start of string */
730 for (offset = 0; offset < length; offset++) {
731 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
733 DEBUGOUT("NVM Read Error\n");
736 pba_num[offset * 2] = (u8)(data >> 8);
737 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
739 pba_num[offset * 2] = '\0';
741 return IXGBE_SUCCESS;
745 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
746 * @hw: pointer to hardware structure
747 * @pba_num: stores the part number from the EEPROM
749 * Reads the part number from the EEPROM.
751 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
756 DEBUGFUNC("ixgbe_read_pba_num_generic");
758 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
760 DEBUGOUT("NVM Read Error\n");
762 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
763 DEBUGOUT("NVM Not supported\n");
764 return IXGBE_NOT_IMPLEMENTED;
766 *pba_num = (u32)(data << 16);
768 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
770 DEBUGOUT("NVM Read Error\n");
775 return IXGBE_SUCCESS;
780 * @hw: pointer to the HW structure
781 * @eeprom_buf: optional pointer to EEPROM image
782 * @eeprom_buf_size: size of EEPROM image in words
783 * @max_pba_block_size: PBA block size limit
784 * @pba: pointer to output PBA structure
786 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
787 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
790 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
791 u32 eeprom_buf_size, u16 max_pba_block_size,
792 struct ixgbe_pba *pba)
798 return IXGBE_ERR_PARAM;
800 if (eeprom_buf == NULL) {
801 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
806 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
807 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
808 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
810 return IXGBE_ERR_PARAM;
814 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
815 if (pba->pba_block == NULL)
816 return IXGBE_ERR_PARAM;
818 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
824 if (pba_block_size > max_pba_block_size)
825 return IXGBE_ERR_PARAM;
827 if (eeprom_buf == NULL) {
828 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
834 if (eeprom_buf_size > (u32)(pba->word[1] +
836 memcpy(pba->pba_block,
837 &eeprom_buf[pba->word[1]],
838 pba_block_size * sizeof(u16));
840 return IXGBE_ERR_PARAM;
845 return IXGBE_SUCCESS;
849 * ixgbe_write_pba_raw
850 * @hw: pointer to the HW structure
851 * @eeprom_buf: optional pointer to EEPROM image
852 * @eeprom_buf_size: size of EEPROM image in words
853 * @pba: pointer to PBA structure
855 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
856 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
859 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
860 u32 eeprom_buf_size, struct ixgbe_pba *pba)
865 return IXGBE_ERR_PARAM;
867 if (eeprom_buf == NULL) {
868 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
873 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
874 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
875 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
877 return IXGBE_ERR_PARAM;
881 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
882 if (pba->pba_block == NULL)
883 return IXGBE_ERR_PARAM;
885 if (eeprom_buf == NULL) {
886 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
892 if (eeprom_buf_size > (u32)(pba->word[1] +
893 pba->pba_block[0])) {
894 memcpy(&eeprom_buf[pba->word[1]],
896 pba->pba_block[0] * sizeof(u16));
898 return IXGBE_ERR_PARAM;
903 return IXGBE_SUCCESS;
907 * ixgbe_get_pba_block_size
908 * @hw: pointer to the HW structure
909 * @eeprom_buf: optional pointer to EEPROM image
910 * @eeprom_buf_size: size of EEPROM image in words
911 * @pba_data_size: pointer to output variable
913 * Returns the size of the PBA block in words. Function operates on EEPROM
914 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
918 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
919 u32 eeprom_buf_size, u16 *pba_block_size)
925 DEBUGFUNC("ixgbe_get_pba_block_size");
927 if (eeprom_buf == NULL) {
928 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
933 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
934 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
935 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
937 return IXGBE_ERR_PARAM;
941 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
942 if (eeprom_buf == NULL) {
943 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
948 if (eeprom_buf_size > pba_word[1])
949 length = eeprom_buf[pba_word[1] + 0];
951 return IXGBE_ERR_PARAM;
954 if (length == 0xFFFF || length == 0)
955 return IXGBE_ERR_PBA_SECTION;
957 /* PBA number in legacy format, there is no PBA Block. */
961 if (pba_block_size != NULL)
962 *pba_block_size = length;
964 return IXGBE_SUCCESS;
968 * ixgbe_get_mac_addr_generic - Generic get MAC address
969 * @hw: pointer to hardware structure
970 * @mac_addr: Adapter MAC address
972 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
973 * A reset of the adapter must be performed prior to calling this function
974 * in order for the MAC address to have been loaded from the EEPROM into RAR0
976 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
982 DEBUGFUNC("ixgbe_get_mac_addr_generic");
984 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
985 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
987 for (i = 0; i < 4; i++)
988 mac_addr[i] = (u8)(rar_low >> (i*8));
990 for (i = 0; i < 2; i++)
991 mac_addr[i+4] = (u8)(rar_high >> (i*8));
993 return IXGBE_SUCCESS;
997 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
998 * @hw: pointer to hardware structure
999 * @link_status: the link status returned by the PCI config space
1001 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
1003 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1005 struct ixgbe_mac_info *mac = &hw->mac;
1007 if (hw->bus.type == ixgbe_bus_type_unknown)
1008 hw->bus.type = ixgbe_bus_type_pci_express;
1010 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1011 case IXGBE_PCI_LINK_WIDTH_1:
1012 hw->bus.width = ixgbe_bus_width_pcie_x1;
1014 case IXGBE_PCI_LINK_WIDTH_2:
1015 hw->bus.width = ixgbe_bus_width_pcie_x2;
1017 case IXGBE_PCI_LINK_WIDTH_4:
1018 hw->bus.width = ixgbe_bus_width_pcie_x4;
1020 case IXGBE_PCI_LINK_WIDTH_8:
1021 hw->bus.width = ixgbe_bus_width_pcie_x8;
1024 hw->bus.width = ixgbe_bus_width_unknown;
1028 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1029 case IXGBE_PCI_LINK_SPEED_2500:
1030 hw->bus.speed = ixgbe_bus_speed_2500;
1032 case IXGBE_PCI_LINK_SPEED_5000:
1033 hw->bus.speed = ixgbe_bus_speed_5000;
1035 case IXGBE_PCI_LINK_SPEED_8000:
1036 hw->bus.speed = ixgbe_bus_speed_8000;
1039 hw->bus.speed = ixgbe_bus_speed_unknown;
1043 mac->ops.set_lan_id(hw);
1047 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1048 * @hw: pointer to hardware structure
1050 * Gets the PCI bus info (speed, width, type) then calls helper function to
1051 * store this data within the ixgbe_hw structure.
1053 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1057 DEBUGFUNC("ixgbe_get_bus_info_generic");
1059 /* Get the negotiated link width and speed from PCI config space */
1060 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1062 ixgbe_set_pci_config_data_generic(hw, link_status);
1064 return IXGBE_SUCCESS;
1068 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1069 * @hw: pointer to the HW structure
1071 * Determines the LAN function id by reading memory-mapped registers and swaps
1072 * the port value if requested, and set MAC instance for devices that share
1075 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1077 struct ixgbe_bus_info *bus = &hw->bus;
1081 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1083 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1084 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1085 bus->lan_id = (u8)bus->func;
1087 /* check for a port swap */
1088 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1089 if (reg & IXGBE_FACTPS_LFS)
1092 /* Get MAC instance from EEPROM for configuring CS4227 */
1093 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1094 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1095 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1096 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1101 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1102 * @hw: pointer to hardware structure
1104 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1105 * disables transmit and receive units. The adapter_stopped flag is used by
1106 * the shared code and drivers to determine if the adapter is in a stopped
1107 * state and should not touch the hardware.
1109 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1114 DEBUGFUNC("ixgbe_stop_adapter_generic");
1117 * Set the adapter_stopped flag so other driver functions stop touching
1120 hw->adapter_stopped = true;
1122 /* Disable the receive unit */
1123 ixgbe_disable_rx(hw);
1125 /* Clear interrupt mask to stop interrupts from being generated */
1126 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1128 /* Clear any pending interrupts, flush previous writes */
1129 IXGBE_READ_REG(hw, IXGBE_EICR);
1131 /* Disable the transmit unit. Each queue must be disabled. */
1132 for (i = 0; i < hw->mac.max_tx_queues; i++)
1133 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1135 /* Disable the receive unit by stopping each queue */
1136 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1137 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1138 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1139 reg_val |= IXGBE_RXDCTL_SWFLSH;
1140 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1143 /* flush all queues disables */
1144 IXGBE_WRITE_FLUSH(hw);
1148 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1149 * access and verify no pending requests
1151 return ixgbe_disable_pcie_master(hw);
1155 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1156 * @hw: pointer to hardware structure
1158 * Store the index for the link active LED. This will be used to support
1161 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1163 struct ixgbe_mac_info *mac = &hw->mac;
1164 u32 led_reg, led_mode;
1167 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1169 /* Get LED link active from the LEDCTL register */
1170 for (i = 0; i < 4; i++) {
1171 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1173 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1174 IXGBE_LED_LINK_ACTIVE) {
1175 mac->led_link_act = i;
1176 return IXGBE_SUCCESS;
1181 * If LEDCTL register does not have the LED link active set, then use
1182 * known MAC defaults.
1184 switch (hw->mac.type) {
1185 case ixgbe_mac_X550EM_a:
1186 case ixgbe_mac_X550EM_x:
1187 mac->led_link_act = 1;
1190 mac->led_link_act = 2;
1192 return IXGBE_SUCCESS;
1196 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1197 * @hw: pointer to hardware structure
1198 * @index: led number to turn on
1200 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1202 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1204 DEBUGFUNC("ixgbe_led_on_generic");
1207 return IXGBE_ERR_PARAM;
1209 /* To turn on the LED, set mode to ON. */
1210 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1211 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1212 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1213 IXGBE_WRITE_FLUSH(hw);
1215 return IXGBE_SUCCESS;
1219 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1220 * @hw: pointer to hardware structure
1221 * @index: led number to turn off
1223 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1225 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1227 DEBUGFUNC("ixgbe_led_off_generic");
1230 return IXGBE_ERR_PARAM;
1232 /* To turn off the LED, set mode to OFF. */
1233 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1234 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1235 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1236 IXGBE_WRITE_FLUSH(hw);
1238 return IXGBE_SUCCESS;
1242 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1243 * @hw: pointer to hardware structure
1245 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1246 * ixgbe_hw struct in order to set up EEPROM access.
1248 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1250 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1254 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1256 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1257 eeprom->type = ixgbe_eeprom_none;
1258 /* Set default semaphore delay to 10ms which is a well
1260 eeprom->semaphore_delay = 10;
1261 /* Clear EEPROM page size, it will be initialized as needed */
1262 eeprom->word_page_size = 0;
1265 * Check for EEPROM present first.
1266 * If not present leave as none
1268 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1269 if (eec & IXGBE_EEC_PRES) {
1270 eeprom->type = ixgbe_eeprom_spi;
1273 * SPI EEPROM is assumed here. This code would need to
1274 * change if a future EEPROM is not SPI.
1276 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1277 IXGBE_EEC_SIZE_SHIFT);
1278 eeprom->word_size = 1 << (eeprom_size +
1279 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1282 if (eec & IXGBE_EEC_ADDR_SIZE)
1283 eeprom->address_bits = 16;
1285 eeprom->address_bits = 8;
1286 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1287 "%d\n", eeprom->type, eeprom->word_size,
1288 eeprom->address_bits);
1291 return IXGBE_SUCCESS;
1295 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1296 * @hw: pointer to hardware structure
1297 * @offset: offset within the EEPROM to write
1298 * @words: number of word(s)
1299 * @data: 16 bit word(s) to write to EEPROM
1301 * Reads 16 bit word(s) from EEPROM through bit-bang method
1303 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1304 u16 words, u16 *data)
1306 s32 status = IXGBE_SUCCESS;
1309 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1311 hw->eeprom.ops.init_params(hw);
1314 status = IXGBE_ERR_INVALID_ARGUMENT;
1318 if (offset + words > hw->eeprom.word_size) {
1319 status = IXGBE_ERR_EEPROM;
1324 * The EEPROM page size cannot be queried from the chip. We do lazy
1325 * initialization. It is worth to do that when we write large buffer.
1327 if ((hw->eeprom.word_page_size == 0) &&
1328 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1329 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1332 * We cannot hold synchronization semaphores for too long
1333 * to avoid other entity starvation. However it is more efficient
1334 * to read in bursts than synchronizing access for each word.
1336 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1337 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1338 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1339 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1342 if (status != IXGBE_SUCCESS)
1351 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1352 * @hw: pointer to hardware structure
1353 * @offset: offset within the EEPROM to be written to
1354 * @words: number of word(s)
1355 * @data: 16 bit word(s) to be written to the EEPROM
1357 * If ixgbe_eeprom_update_checksum is not called after this function, the
1358 * EEPROM will most likely contain an invalid checksum.
1360 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1361 u16 words, u16 *data)
1367 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1369 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1371 /* Prepare the EEPROM for writing */
1372 status = ixgbe_acquire_eeprom(hw);
1374 if (status == IXGBE_SUCCESS) {
1375 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1376 ixgbe_release_eeprom(hw);
1377 status = IXGBE_ERR_EEPROM;
1381 if (status == IXGBE_SUCCESS) {
1382 for (i = 0; i < words; i++) {
1383 ixgbe_standby_eeprom(hw);
1385 /* Send the WRITE ENABLE command (8 bit opcode ) */
1386 ixgbe_shift_out_eeprom_bits(hw,
1387 IXGBE_EEPROM_WREN_OPCODE_SPI,
1388 IXGBE_EEPROM_OPCODE_BITS);
1390 ixgbe_standby_eeprom(hw);
1393 * Some SPI eeproms use the 8th address bit embedded
1396 if ((hw->eeprom.address_bits == 8) &&
1397 ((offset + i) >= 128))
1398 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1400 /* Send the Write command (8-bit opcode + addr) */
1401 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1402 IXGBE_EEPROM_OPCODE_BITS);
1403 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1404 hw->eeprom.address_bits);
1406 page_size = hw->eeprom.word_page_size;
1408 /* Send the data in burst via SPI*/
1411 word = (word >> 8) | (word << 8);
1412 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1417 /* do not wrap around page */
1418 if (((offset + i) & (page_size - 1)) ==
1421 } while (++i < words);
1423 ixgbe_standby_eeprom(hw);
1426 /* Done with writing - release the EEPROM */
1427 ixgbe_release_eeprom(hw);
1434 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1435 * @hw: pointer to hardware structure
1436 * @offset: offset within the EEPROM to be written to
1437 * @data: 16 bit word to be written to the EEPROM
1439 * If ixgbe_eeprom_update_checksum is not called after this function, the
1440 * EEPROM will most likely contain an invalid checksum.
1442 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1446 DEBUGFUNC("ixgbe_write_eeprom_generic");
1448 hw->eeprom.ops.init_params(hw);
1450 if (offset >= hw->eeprom.word_size) {
1451 status = IXGBE_ERR_EEPROM;
1455 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1462 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1463 * @hw: pointer to hardware structure
1464 * @offset: offset within the EEPROM to be read
1465 * @data: read 16 bit words(s) from EEPROM
1466 * @words: number of word(s)
1468 * Reads 16 bit word(s) from EEPROM through bit-bang method
1470 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1471 u16 words, u16 *data)
1473 s32 status = IXGBE_SUCCESS;
1476 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1478 hw->eeprom.ops.init_params(hw);
1481 status = IXGBE_ERR_INVALID_ARGUMENT;
1485 if (offset + words > hw->eeprom.word_size) {
1486 status = IXGBE_ERR_EEPROM;
1491 * We cannot hold synchronization semaphores for too long
1492 * to avoid other entity starvation. However it is more efficient
1493 * to read in bursts than synchronizing access for each word.
1495 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1496 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1497 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1499 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1502 if (status != IXGBE_SUCCESS)
1511 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1512 * @hw: pointer to hardware structure
1513 * @offset: offset within the EEPROM to be read
1514 * @words: number of word(s)
1515 * @data: read 16 bit word(s) from EEPROM
1517 * Reads 16 bit word(s) from EEPROM through bit-bang method
1519 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1520 u16 words, u16 *data)
1524 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1527 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1529 /* Prepare the EEPROM for reading */
1530 status = ixgbe_acquire_eeprom(hw);
1532 if (status == IXGBE_SUCCESS) {
1533 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1534 ixgbe_release_eeprom(hw);
1535 status = IXGBE_ERR_EEPROM;
1539 if (status == IXGBE_SUCCESS) {
1540 for (i = 0; i < words; i++) {
1541 ixgbe_standby_eeprom(hw);
1543 * Some SPI eeproms use the 8th address bit embedded
1546 if ((hw->eeprom.address_bits == 8) &&
1547 ((offset + i) >= 128))
1548 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1550 /* Send the READ command (opcode + addr) */
1551 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1552 IXGBE_EEPROM_OPCODE_BITS);
1553 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1554 hw->eeprom.address_bits);
1556 /* Read the data. */
1557 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1558 data[i] = (word_in >> 8) | (word_in << 8);
1561 /* End this read operation */
1562 ixgbe_release_eeprom(hw);
1569 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1570 * @hw: pointer to hardware structure
1571 * @offset: offset within the EEPROM to be read
1572 * @data: read 16 bit value from EEPROM
1574 * Reads 16 bit value from EEPROM through bit-bang method
1576 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1581 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1583 hw->eeprom.ops.init_params(hw);
1585 if (offset >= hw->eeprom.word_size) {
1586 status = IXGBE_ERR_EEPROM;
1590 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1597 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1598 * @hw: pointer to hardware structure
1599 * @offset: offset of word in the EEPROM to read
1600 * @words: number of word(s)
1601 * @data: 16 bit word(s) from the EEPROM
1603 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1605 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1606 u16 words, u16 *data)
1609 s32 status = IXGBE_SUCCESS;
1612 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1614 hw->eeprom.ops.init_params(hw);
1617 status = IXGBE_ERR_INVALID_ARGUMENT;
1618 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1622 if (offset >= hw->eeprom.word_size) {
1623 status = IXGBE_ERR_EEPROM;
1624 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1628 for (i = 0; i < words; i++) {
1629 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1630 IXGBE_EEPROM_RW_REG_START;
1632 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1633 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1635 if (status == IXGBE_SUCCESS) {
1636 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1637 IXGBE_EEPROM_RW_REG_DATA);
1639 DEBUGOUT("Eeprom read timed out\n");
1648 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1649 * @hw: pointer to hardware structure
1650 * @offset: offset within the EEPROM to be used as a scratch pad
1652 * Discover EEPROM page size by writing marching data at given offset.
1653 * This function is called only when we are writing a new large buffer
1654 * at given offset so the data would be overwritten anyway.
1656 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1659 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1660 s32 status = IXGBE_SUCCESS;
1663 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1665 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1668 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1669 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1670 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1671 hw->eeprom.word_page_size = 0;
1672 if (status != IXGBE_SUCCESS)
1675 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1676 if (status != IXGBE_SUCCESS)
1680 * When writing in burst more than the actual page size
1681 * EEPROM address wraps around current page.
1683 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1685 DEBUGOUT1("Detected EEPROM page size = %d words.",
1686 hw->eeprom.word_page_size);
1692 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1693 * @hw: pointer to hardware structure
1694 * @offset: offset of word in the EEPROM to read
1695 * @data: word read from the EEPROM
1697 * Reads a 16 bit word from the EEPROM using the EERD register.
1699 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1701 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1705 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1706 * @hw: pointer to hardware structure
1707 * @offset: offset of word in the EEPROM to write
1708 * @words: number of word(s)
1709 * @data: word(s) write to the EEPROM
1711 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1713 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1714 u16 words, u16 *data)
1717 s32 status = IXGBE_SUCCESS;
1720 DEBUGFUNC("ixgbe_write_eewr_generic");
1722 hw->eeprom.ops.init_params(hw);
1725 status = IXGBE_ERR_INVALID_ARGUMENT;
1726 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1730 if (offset >= hw->eeprom.word_size) {
1731 status = IXGBE_ERR_EEPROM;
1732 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1736 for (i = 0; i < words; i++) {
1737 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1738 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1739 IXGBE_EEPROM_RW_REG_START;
1741 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1742 if (status != IXGBE_SUCCESS) {
1743 DEBUGOUT("Eeprom write EEWR timed out\n");
1747 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1749 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1750 if (status != IXGBE_SUCCESS) {
1751 DEBUGOUT("Eeprom write EEWR timed out\n");
1761 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1762 * @hw: pointer to hardware structure
1763 * @offset: offset of word in the EEPROM to write
1764 * @data: word write to the EEPROM
1766 * Write a 16 bit word to the EEPROM using the EEWR register.
1768 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1770 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1774 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1775 * @hw: pointer to hardware structure
1776 * @ee_reg: EEPROM flag for polling
1778 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1779 * read or write is done respectively.
1781 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1785 s32 status = IXGBE_ERR_EEPROM;
1787 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1789 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1790 if (ee_reg == IXGBE_NVM_POLL_READ)
1791 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1793 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1795 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1796 status = IXGBE_SUCCESS;
1802 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1803 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1804 "EEPROM read/write done polling timed out");
1810 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1811 * @hw: pointer to hardware structure
1813 * Prepares EEPROM for access using bit-bang method. This function should
1814 * be called before issuing a command to the EEPROM.
1816 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1818 s32 status = IXGBE_SUCCESS;
1822 DEBUGFUNC("ixgbe_acquire_eeprom");
1824 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1826 status = IXGBE_ERR_SWFW_SYNC;
1828 if (status == IXGBE_SUCCESS) {
1829 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1831 /* Request EEPROM Access */
1832 eec |= IXGBE_EEC_REQ;
1833 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1835 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1836 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1837 if (eec & IXGBE_EEC_GNT)
1842 /* Release if grant not acquired */
1843 if (!(eec & IXGBE_EEC_GNT)) {
1844 eec &= ~IXGBE_EEC_REQ;
1845 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1846 DEBUGOUT("Could not acquire EEPROM grant\n");
1848 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1849 status = IXGBE_ERR_EEPROM;
1852 /* Setup EEPROM for Read/Write */
1853 if (status == IXGBE_SUCCESS) {
1854 /* Clear CS and SK */
1855 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1856 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1857 IXGBE_WRITE_FLUSH(hw);
1865 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1866 * @hw: pointer to hardware structure
1868 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1870 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1872 s32 status = IXGBE_ERR_EEPROM;
1877 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1880 /* Get SMBI software semaphore between device drivers first */
1881 for (i = 0; i < timeout; i++) {
1883 * If the SMBI bit is 0 when we read it, then the bit will be
1884 * set and we have the semaphore
1886 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1887 if (!(swsm & IXGBE_SWSM_SMBI)) {
1888 status = IXGBE_SUCCESS;
1895 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1898 * this release is particularly important because our attempts
1899 * above to get the semaphore may have succeeded, and if there
1900 * was a timeout, we should unconditionally clear the semaphore
1901 * bits to free the driver to make progress
1903 ixgbe_release_eeprom_semaphore(hw);
1908 * If the SMBI bit is 0 when we read it, then the bit will be
1909 * set and we have the semaphore
1911 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1912 if (!(swsm & IXGBE_SWSM_SMBI))
1913 status = IXGBE_SUCCESS;
1916 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1917 if (status == IXGBE_SUCCESS) {
1918 for (i = 0; i < timeout; i++) {
1919 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1921 /* Set the SW EEPROM semaphore bit to request access */
1922 swsm |= IXGBE_SWSM_SWESMBI;
1923 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1926 * If we set the bit successfully then we got the
1929 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1930 if (swsm & IXGBE_SWSM_SWESMBI)
1937 * Release semaphores and return error if SW EEPROM semaphore
1938 * was not granted because we don't have access to the EEPROM
1941 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1942 "SWESMBI Software EEPROM semaphore not granted.\n");
1943 ixgbe_release_eeprom_semaphore(hw);
1944 status = IXGBE_ERR_EEPROM;
1947 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1948 "Software semaphore SMBI between device drivers "
1956 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1957 * @hw: pointer to hardware structure
1959 * This function clears hardware semaphore bits.
1961 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1965 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1967 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1969 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1970 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1971 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1972 IXGBE_WRITE_FLUSH(hw);
1976 * ixgbe_ready_eeprom - Polls for EEPROM ready
1977 * @hw: pointer to hardware structure
1979 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1981 s32 status = IXGBE_SUCCESS;
1985 DEBUGFUNC("ixgbe_ready_eeprom");
1988 * Read "Status Register" repeatedly until the LSB is cleared. The
1989 * EEPROM will signal that the command has been completed by clearing
1990 * bit 0 of the internal status register. If it's not cleared within
1991 * 5 milliseconds, then error out.
1993 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1994 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1995 IXGBE_EEPROM_OPCODE_BITS);
1996 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1997 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
2001 ixgbe_standby_eeprom(hw);
2005 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2006 * devices (and only 0-5mSec on 5V devices)
2008 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2009 DEBUGOUT("SPI EEPROM Status error\n");
2010 status = IXGBE_ERR_EEPROM;
2017 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2018 * @hw: pointer to hardware structure
2020 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2024 DEBUGFUNC("ixgbe_standby_eeprom");
2026 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2028 /* Toggle CS to flush commands */
2029 eec |= IXGBE_EEC_CS;
2030 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2031 IXGBE_WRITE_FLUSH(hw);
2033 eec &= ~IXGBE_EEC_CS;
2034 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2035 IXGBE_WRITE_FLUSH(hw);
2040 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2041 * @hw: pointer to hardware structure
2042 * @data: data to send to the EEPROM
2043 * @count: number of bits to shift out
2045 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2052 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2054 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2057 * Mask is used to shift "count" bits of "data" out to the EEPROM
2058 * one bit at a time. Determine the starting bit based on count
2060 mask = 0x01 << (count - 1);
2062 for (i = 0; i < count; i++) {
2064 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2065 * "1", and then raising and then lowering the clock (the SK
2066 * bit controls the clock input to the EEPROM). A "0" is
2067 * shifted out to the EEPROM by setting "DI" to "0" and then
2068 * raising and then lowering the clock.
2071 eec |= IXGBE_EEC_DI;
2073 eec &= ~IXGBE_EEC_DI;
2075 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2076 IXGBE_WRITE_FLUSH(hw);
2080 ixgbe_raise_eeprom_clk(hw, &eec);
2081 ixgbe_lower_eeprom_clk(hw, &eec);
2084 * Shift mask to signify next bit of data to shift in to the
2090 /* We leave the "DI" bit set to "0" when we leave this routine. */
2091 eec &= ~IXGBE_EEC_DI;
2092 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2093 IXGBE_WRITE_FLUSH(hw);
2097 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2098 * @hw: pointer to hardware structure
2099 * @count: number of bits to shift
2101 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2107 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2110 * In order to read a register from the EEPROM, we need to shift
2111 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2112 * the clock input to the EEPROM (setting the SK bit), and then reading
2113 * the value of the "DO" bit. During this "shifting in" process the
2114 * "DI" bit should always be clear.
2116 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2118 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2120 for (i = 0; i < count; i++) {
2122 ixgbe_raise_eeprom_clk(hw, &eec);
2124 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2126 eec &= ~(IXGBE_EEC_DI);
2127 if (eec & IXGBE_EEC_DO)
2130 ixgbe_lower_eeprom_clk(hw, &eec);
2137 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2138 * @hw: pointer to hardware structure
2139 * @eec: EEC register's current value
2141 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2143 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2146 * Raise the clock input to the EEPROM
2147 * (setting the SK bit), then delay
2149 *eec = *eec | IXGBE_EEC_SK;
2150 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2151 IXGBE_WRITE_FLUSH(hw);
2156 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2157 * @hw: pointer to hardware structure
2158 * @eec: EEC's current value
2160 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2162 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2165 * Lower the clock input to the EEPROM (clearing the SK bit), then
2168 *eec = *eec & ~IXGBE_EEC_SK;
2169 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2170 IXGBE_WRITE_FLUSH(hw);
2175 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2176 * @hw: pointer to hardware structure
2178 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2182 DEBUGFUNC("ixgbe_release_eeprom");
2184 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2186 eec |= IXGBE_EEC_CS; /* Pull CS high */
2187 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2189 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2190 IXGBE_WRITE_FLUSH(hw);
2194 /* Stop requesting EEPROM access */
2195 eec &= ~IXGBE_EEC_REQ;
2196 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2198 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2200 /* Delay before attempt to obtain semaphore again to allow FW access */
2201 msec_delay(hw->eeprom.semaphore_delay);
2205 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2206 * @hw: pointer to hardware structure
2208 * Returns a negative error code on error, or the 16-bit checksum
2210 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2219 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2221 /* Include 0x0-0x3F in the checksum */
2222 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2223 if (hw->eeprom.ops.read(hw, i, &word)) {
2224 DEBUGOUT("EEPROM read failed\n");
2225 return IXGBE_ERR_EEPROM;
2230 /* Include all data from pointers except for the fw pointer */
2231 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2232 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2233 DEBUGOUT("EEPROM read failed\n");
2234 return IXGBE_ERR_EEPROM;
2237 /* If the pointer seems invalid */
2238 if (pointer == 0xFFFF || pointer == 0)
2241 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2242 DEBUGOUT("EEPROM read failed\n");
2243 return IXGBE_ERR_EEPROM;
2246 if (length == 0xFFFF || length == 0)
2249 for (j = pointer + 1; j <= pointer + length; j++) {
2250 if (hw->eeprom.ops.read(hw, j, &word)) {
2251 DEBUGOUT("EEPROM read failed\n");
2252 return IXGBE_ERR_EEPROM;
2258 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2260 return (s32)checksum;
2264 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2265 * @hw: pointer to hardware structure
2266 * @checksum_val: calculated checksum
2268 * Performs checksum calculation and validates the EEPROM checksum. If the
2269 * caller does not need checksum_val, the value can be NULL.
2271 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2276 u16 read_checksum = 0;
2278 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2280 /* Read the first word from the EEPROM. If this times out or fails, do
2281 * not continue or we could be in for a very long wait while every
2284 status = hw->eeprom.ops.read(hw, 0, &checksum);
2286 DEBUGOUT("EEPROM read failed\n");
2290 status = hw->eeprom.ops.calc_checksum(hw);
2294 checksum = (u16)(status & 0xffff);
2296 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2298 DEBUGOUT("EEPROM read failed\n");
2302 /* Verify read checksum from EEPROM is the same as
2303 * calculated checksum
2305 if (read_checksum != checksum)
2306 status = IXGBE_ERR_EEPROM_CHECKSUM;
2308 /* If the user cares, return the calculated checksum */
2310 *checksum_val = checksum;
2316 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2317 * @hw: pointer to hardware structure
2319 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2324 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2326 /* Read the first word from the EEPROM. If this times out or fails, do
2327 * not continue or we could be in for a very long wait while every
2330 status = hw->eeprom.ops.read(hw, 0, &checksum);
2332 DEBUGOUT("EEPROM read failed\n");
2336 status = hw->eeprom.ops.calc_checksum(hw);
2340 checksum = (u16)(status & 0xffff);
2342 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2348 * ixgbe_validate_mac_addr - Validate MAC address
2349 * @mac_addr: pointer to MAC address.
2351 * Tests a MAC address to ensure it is a valid Individual Address.
2353 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2355 s32 status = IXGBE_SUCCESS;
2357 DEBUGFUNC("ixgbe_validate_mac_addr");
2359 /* Make sure it is not a multicast address */
2360 if (IXGBE_IS_MULTICAST(mac_addr)) {
2361 status = IXGBE_ERR_INVALID_MAC_ADDR;
2362 /* Not a broadcast address */
2363 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2364 status = IXGBE_ERR_INVALID_MAC_ADDR;
2365 /* Reject the zero address */
2366 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2367 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2368 status = IXGBE_ERR_INVALID_MAC_ADDR;
2374 * ixgbe_set_rar_generic - Set Rx address register
2375 * @hw: pointer to hardware structure
2376 * @index: Receive address register to write
2377 * @addr: Address to put into receive address register
2378 * @vmdq: VMDq "set" or "pool" index
2379 * @enable_addr: set flag that address is active
2381 * Puts an ethernet address into a receive address register.
2383 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2386 u32 rar_low, rar_high;
2387 u32 rar_entries = hw->mac.num_rar_entries;
2389 DEBUGFUNC("ixgbe_set_rar_generic");
2391 /* Make sure we are using a valid rar index range */
2392 if (index >= rar_entries) {
2393 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2394 "RAR index %d is out of range.\n", index);
2395 return IXGBE_ERR_INVALID_ARGUMENT;
2398 /* setup VMDq pool selection before this RAR gets enabled */
2399 hw->mac.ops.set_vmdq(hw, index, vmdq);
2402 * HW expects these in little endian so we reverse the byte
2403 * order from network order (big endian) to little endian
2405 rar_low = ((u32)addr[0] |
2406 ((u32)addr[1] << 8) |
2407 ((u32)addr[2] << 16) |
2408 ((u32)addr[3] << 24));
2410 * Some parts put the VMDq setting in the extra RAH bits,
2411 * so save everything except the lower 16 bits that hold part
2412 * of the address and the address valid bit.
2414 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2415 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2416 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2418 if (enable_addr != 0)
2419 rar_high |= IXGBE_RAH_AV;
2421 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2422 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2424 return IXGBE_SUCCESS;
2428 * ixgbe_clear_rar_generic - Remove Rx address register
2429 * @hw: pointer to hardware structure
2430 * @index: Receive address register to write
2432 * Clears an ethernet address from a receive address register.
2434 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2437 u32 rar_entries = hw->mac.num_rar_entries;
2439 DEBUGFUNC("ixgbe_clear_rar_generic");
2441 /* Make sure we are using a valid rar index range */
2442 if (index >= rar_entries) {
2443 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2444 "RAR index %d is out of range.\n", index);
2445 return IXGBE_ERR_INVALID_ARGUMENT;
2449 * Some parts put the VMDq setting in the extra RAH bits,
2450 * so save everything except the lower 16 bits that hold part
2451 * of the address and the address valid bit.
2453 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2454 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2456 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2457 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2459 /* clear VMDq pool/queue selection for this RAR */
2460 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2462 return IXGBE_SUCCESS;
2466 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2467 * @hw: pointer to hardware structure
2469 * Places the MAC address in receive address register 0 and clears the rest
2470 * of the receive address registers. Clears the multicast table. Assumes
2471 * the receiver is in reset when the routine is called.
2473 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2476 u32 rar_entries = hw->mac.num_rar_entries;
2478 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2481 * If the current mac address is valid, assume it is a software override
2482 * to the permanent address.
2483 * Otherwise, use the permanent address from the eeprom.
2485 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2486 IXGBE_ERR_INVALID_MAC_ADDR) {
2487 /* Get the MAC address from the RAR0 for later reference */
2488 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2490 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2491 hw->mac.addr[0], hw->mac.addr[1],
2493 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2494 hw->mac.addr[4], hw->mac.addr[5]);
2496 /* Setup the receive address. */
2497 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2498 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2499 hw->mac.addr[0], hw->mac.addr[1],
2501 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2502 hw->mac.addr[4], hw->mac.addr[5]);
2504 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2507 /* clear VMDq pool/queue selection for RAR 0 */
2508 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2510 hw->addr_ctrl.overflow_promisc = 0;
2512 hw->addr_ctrl.rar_used_count = 1;
2514 /* Zero out the other receive addresses. */
2515 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2516 for (i = 1; i < rar_entries; i++) {
2517 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2518 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2522 hw->addr_ctrl.mta_in_use = 0;
2523 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2525 DEBUGOUT(" Clearing MTA\n");
2526 for (i = 0; i < hw->mac.mcft_size; i++)
2527 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2529 ixgbe_init_uta_tables(hw);
2531 return IXGBE_SUCCESS;
2535 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2536 * @hw: pointer to hardware structure
2537 * @addr: new address
2538 * @vmdq: VMDq "set" or "pool" index
2540 * Adds it to unused receive address register or goes into promiscuous mode.
2542 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2544 u32 rar_entries = hw->mac.num_rar_entries;
2547 DEBUGFUNC("ixgbe_add_uc_addr");
2549 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2550 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2553 * Place this address in the RAR if there is room,
2554 * else put the controller into promiscuous mode
2556 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2557 rar = hw->addr_ctrl.rar_used_count;
2558 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2559 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2560 hw->addr_ctrl.rar_used_count++;
2562 hw->addr_ctrl.overflow_promisc++;
2565 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2569 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2570 * @hw: pointer to hardware structure
2571 * @addr_list: the list of new addresses
2572 * @addr_count: number of addresses
2573 * @next: iterator function to walk the address list
2575 * The given list replaces any existing list. Clears the secondary addrs from
2576 * receive address registers. Uses unused receive address registers for the
2577 * first secondary addresses, and falls back to promiscuous mode as needed.
2579 * Drivers using secondary unicast addresses must set user_set_promisc when
2580 * manually putting the device into promiscuous mode.
2582 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2583 u32 addr_count, ixgbe_mc_addr_itr next)
2587 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2592 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2595 * Clear accounting of old secondary address list,
2596 * don't count RAR[0]
2598 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2599 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2600 hw->addr_ctrl.overflow_promisc = 0;
2602 /* Zero out the other receive addresses */
2603 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2604 for (i = 0; i < uc_addr_in_use; i++) {
2605 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2606 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2609 /* Add the new addresses */
2610 for (i = 0; i < addr_count; i++) {
2611 DEBUGOUT(" Adding the secondary addresses:\n");
2612 addr = next(hw, &addr_list, &vmdq);
2613 ixgbe_add_uc_addr(hw, addr, vmdq);
2616 if (hw->addr_ctrl.overflow_promisc) {
2617 /* enable promisc if not already in overflow or set by user */
2618 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2619 DEBUGOUT(" Entering address overflow promisc mode\n");
2620 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2621 fctrl |= IXGBE_FCTRL_UPE;
2622 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2625 /* only disable if set by overflow, not by user */
2626 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2627 DEBUGOUT(" Leaving address overflow promisc mode\n");
2628 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2629 fctrl &= ~IXGBE_FCTRL_UPE;
2630 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2634 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2635 return IXGBE_SUCCESS;
2639 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2640 * @hw: pointer to hardware structure
2641 * @mc_addr: the multicast address
2643 * Extracts the 12 bits, from a multicast address, to determine which
2644 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2645 * incoming rx multicast addresses, to determine the bit-vector to check in
2646 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2647 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2648 * to mc_filter_type.
2650 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2654 DEBUGFUNC("ixgbe_mta_vector");
2656 switch (hw->mac.mc_filter_type) {
2657 case 0: /* use bits [47:36] of the address */
2658 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2660 case 1: /* use bits [46:35] of the address */
2661 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2663 case 2: /* use bits [45:34] of the address */
2664 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2666 case 3: /* use bits [43:32] of the address */
2667 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2669 default: /* Invalid mc_filter_type */
2670 DEBUGOUT("MC filter type param set incorrectly\n");
2675 /* vector can only be 12-bits or boundary will be exceeded */
2681 * ixgbe_set_mta - Set bit-vector in multicast table
2682 * @hw: pointer to hardware structure
2683 * @mc_addr: Multicast address
2685 * Sets the bit-vector in the multicast table.
2687 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2693 DEBUGFUNC("ixgbe_set_mta");
2695 hw->addr_ctrl.mta_in_use++;
2697 vector = ixgbe_mta_vector(hw, mc_addr);
2698 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2701 * The MTA is a register array of 128 32-bit registers. It is treated
2702 * like an array of 4096 bits. We want to set bit
2703 * BitArray[vector_value]. So we figure out what register the bit is
2704 * in, read it, OR in the new bit, then write back the new value. The
2705 * register is determined by the upper 7 bits of the vector value and
2706 * the bit within that register are determined by the lower 5 bits of
2709 vector_reg = (vector >> 5) & 0x7F;
2710 vector_bit = vector & 0x1F;
2711 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2715 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2716 * @hw: pointer to hardware structure
2717 * @mc_addr_list: the list of new multicast addresses
2718 * @mc_addr_count: number of addresses
2719 * @next: iterator function to walk the multicast address list
2720 * @clear: flag, when set clears the table beforehand
2722 * When the clear flag is set, the given list replaces any existing list.
2723 * Hashes the given addresses into the multicast table.
2725 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2726 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2732 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2735 * Set the new number of MC addresses that we are being requested to
2738 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2739 hw->addr_ctrl.mta_in_use = 0;
2741 /* Clear mta_shadow */
2743 DEBUGOUT(" Clearing MTA\n");
2744 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2747 /* Update mta_shadow */
2748 for (i = 0; i < mc_addr_count; i++) {
2749 DEBUGOUT(" Adding the multicast addresses:\n");
2750 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2754 for (i = 0; i < hw->mac.mcft_size; i++)
2755 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2756 hw->mac.mta_shadow[i]);
2758 if (hw->addr_ctrl.mta_in_use > 0)
2759 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2760 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2762 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2763 return IXGBE_SUCCESS;
2767 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2768 * @hw: pointer to hardware structure
2770 * Enables multicast address in RAR and the use of the multicast hash table.
2772 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2774 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2776 DEBUGFUNC("ixgbe_enable_mc_generic");
2778 if (a->mta_in_use > 0)
2779 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2780 hw->mac.mc_filter_type);
2782 return IXGBE_SUCCESS;
2786 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2787 * @hw: pointer to hardware structure
2789 * Disables multicast address in RAR and the use of the multicast hash table.
2791 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2793 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2795 DEBUGFUNC("ixgbe_disable_mc_generic");
2797 if (a->mta_in_use > 0)
2798 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2800 return IXGBE_SUCCESS;
2804 * ixgbe_fc_enable_generic - Enable flow control
2805 * @hw: pointer to hardware structure
2807 * Enable flow control according to the current settings.
2809 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2811 s32 ret_val = IXGBE_SUCCESS;
2812 u32 mflcn_reg, fccfg_reg;
2817 DEBUGFUNC("ixgbe_fc_enable_generic");
2819 /* Validate the water mark configuration */
2820 if (!hw->fc.pause_time) {
2821 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2825 /* Low water mark of zero causes XOFF floods */
2826 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2827 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2828 hw->fc.high_water[i]) {
2829 if (!hw->fc.low_water[i] ||
2830 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2831 DEBUGOUT("Invalid water mark configuration\n");
2832 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2838 /* Negotiate the fc mode to use */
2839 hw->mac.ops.fc_autoneg(hw);
2841 /* Disable any previous flow control settings */
2842 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2843 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2845 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2846 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2849 * The possible values of fc.current_mode are:
2850 * 0: Flow control is completely disabled
2851 * 1: Rx flow control is enabled (we can receive pause frames,
2852 * but not send pause frames).
2853 * 2: Tx flow control is enabled (we can send pause frames but
2854 * we do not support receiving pause frames).
2855 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2858 switch (hw->fc.current_mode) {
2861 * Flow control is disabled by software override or autoneg.
2862 * The code below will actually disable it in the HW.
2865 case ixgbe_fc_rx_pause:
2867 * Rx Flow control is enabled and Tx Flow control is
2868 * disabled by software override. Since there really
2869 * isn't a way to advertise that we are capable of RX
2870 * Pause ONLY, we will advertise that we support both
2871 * symmetric and asymmetric Rx PAUSE. Later, we will
2872 * disable the adapter's ability to send PAUSE frames.
2874 mflcn_reg |= IXGBE_MFLCN_RFCE;
2876 case ixgbe_fc_tx_pause:
2878 * Tx Flow control is enabled, and Rx Flow control is
2879 * disabled by software override.
2881 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2884 /* Flow control (both Rx and Tx) is enabled by SW override. */
2885 mflcn_reg |= IXGBE_MFLCN_RFCE;
2886 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2889 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2890 "Flow control param set incorrectly\n");
2891 ret_val = IXGBE_ERR_CONFIG;
2896 /* Set 802.3x based flow control settings. */
2897 mflcn_reg |= IXGBE_MFLCN_DPF;
2898 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2899 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2902 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2903 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2904 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2905 hw->fc.high_water[i]) {
2906 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2907 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2908 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2910 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2912 * In order to prevent Tx hangs when the internal Tx
2913 * switch is enabled we must set the high water mark
2914 * to the Rx packet buffer size - 24KB. This allows
2915 * the Tx switch to function even under heavy Rx
2918 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2921 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2924 /* Configure pause time (2 TCs per register) */
2925 reg = hw->fc.pause_time * 0x00010001;
2926 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2927 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2929 /* Configure flow control refresh threshold value */
2930 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2937 * ixgbe_negotiate_fc - Negotiate flow control
2938 * @hw: pointer to hardware structure
2939 * @adv_reg: flow control advertised settings
2940 * @lp_reg: link partner's flow control settings
2941 * @adv_sym: symmetric pause bit in advertisement
2942 * @adv_asm: asymmetric pause bit in advertisement
2943 * @lp_sym: symmetric pause bit in link partner advertisement
2944 * @lp_asm: asymmetric pause bit in link partner advertisement
2946 * Find the intersection between advertised settings and link partner's
2947 * advertised settings
2949 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2950 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2952 if ((!(adv_reg)) || (!(lp_reg))) {
2953 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2954 "Local or link partner's advertised flow control "
2955 "settings are NULL. Local: %x, link partner: %x\n",
2957 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2960 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2962 * Now we need to check if the user selected Rx ONLY
2963 * of pause frames. In this case, we had to advertise
2964 * FULL flow control because we could not advertise RX
2965 * ONLY. Hence, we must now check to see if we need to
2966 * turn OFF the TRANSMISSION of PAUSE frames.
2968 if (hw->fc.requested_mode == ixgbe_fc_full) {
2969 hw->fc.current_mode = ixgbe_fc_full;
2970 DEBUGOUT("Flow Control = FULL.\n");
2972 hw->fc.current_mode = ixgbe_fc_rx_pause;
2973 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2975 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2976 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2977 hw->fc.current_mode = ixgbe_fc_tx_pause;
2978 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2979 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2980 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2981 hw->fc.current_mode = ixgbe_fc_rx_pause;
2982 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2984 hw->fc.current_mode = ixgbe_fc_none;
2985 DEBUGOUT("Flow Control = NONE.\n");
2987 return IXGBE_SUCCESS;
2991 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2992 * @hw: pointer to hardware structure
2994 * Enable flow control according on 1 gig fiber.
2996 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2998 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2999 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3002 * On multispeed fiber at 1g, bail out if
3003 * - link is up but AN did not complete, or if
3004 * - link is up and AN completed but timed out
3007 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3008 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3009 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3010 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3014 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3015 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3017 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3018 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3019 IXGBE_PCS1GANA_ASM_PAUSE,
3020 IXGBE_PCS1GANA_SYM_PAUSE,
3021 IXGBE_PCS1GANA_ASM_PAUSE);
3028 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3029 * @hw: pointer to hardware structure
3031 * Enable flow control according to IEEE clause 37.
3033 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3035 u32 links2, anlp1_reg, autoc_reg, links;
3036 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3039 * On backplane, bail out if
3040 * - backplane autoneg was not completed, or if
3041 * - we are 82599 and link partner is not AN enabled
3043 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3044 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3045 DEBUGOUT("Auto-Negotiation did not complete\n");
3049 if (hw->mac.type == ixgbe_mac_82599EB) {
3050 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3051 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3052 DEBUGOUT("Link partner is not AN enabled\n");
3057 * Read the 10g AN autoc and LP ability registers and resolve
3058 * local flow control settings accordingly
3060 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3061 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3063 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3064 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3065 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3072 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3073 * @hw: pointer to hardware structure
3075 * Enable flow control according to IEEE clause 37.
3077 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3079 u16 technology_ability_reg = 0;
3080 u16 lp_technology_ability_reg = 0;
3082 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3083 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3084 &technology_ability_reg);
3085 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3086 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3087 &lp_technology_ability_reg);
3089 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3090 (u32)lp_technology_ability_reg,
3091 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3092 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3096 * ixgbe_fc_autoneg - Configure flow control
3097 * @hw: pointer to hardware structure
3099 * Compares our advertised flow control capabilities to those advertised by
3100 * our link partner, and determines the proper flow control mode to use.
3102 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3104 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3105 ixgbe_link_speed speed;
3108 DEBUGFUNC("ixgbe_fc_autoneg");
3111 * AN should have completed when the cable was plugged in.
3112 * Look for reasons to bail out. Bail out if:
3113 * - FC autoneg is disabled, or if
3116 if (hw->fc.disable_fc_autoneg) {
3117 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3118 "Flow control autoneg is disabled");
3122 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3124 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3128 switch (hw->phy.media_type) {
3129 /* Autoneg flow control on fiber adapters */
3130 case ixgbe_media_type_fiber_fixed:
3131 case ixgbe_media_type_fiber_qsfp:
3132 case ixgbe_media_type_fiber:
3133 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3134 ret_val = ixgbe_fc_autoneg_fiber(hw);
3137 /* Autoneg flow control on backplane adapters */
3138 case ixgbe_media_type_backplane:
3139 ret_val = ixgbe_fc_autoneg_backplane(hw);
3142 /* Autoneg flow control on copper adapters */
3143 case ixgbe_media_type_copper:
3144 if (ixgbe_device_supports_autoneg_fc(hw))
3145 ret_val = ixgbe_fc_autoneg_copper(hw);
3153 if (ret_val == IXGBE_SUCCESS) {
3154 hw->fc.fc_was_autonegged = true;
3156 hw->fc.fc_was_autonegged = false;
3157 hw->fc.current_mode = hw->fc.requested_mode;
3162 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3163 * @hw: pointer to hardware structure
3165 * System-wide timeout range is encoded in PCIe Device Control2 register.
3167 * Add 10% to specified maximum and return the number of times to poll for
3168 * completion timeout, in units of 100 microsec. Never return less than
3169 * 800 = 80 millisec.
3171 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3176 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3177 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3180 case IXGBE_PCIDEVCTRL2_65_130ms:
3181 pollcnt = 1300; /* 130 millisec */
3183 case IXGBE_PCIDEVCTRL2_260_520ms:
3184 pollcnt = 5200; /* 520 millisec */
3186 case IXGBE_PCIDEVCTRL2_1_2s:
3187 pollcnt = 20000; /* 2 sec */
3189 case IXGBE_PCIDEVCTRL2_4_8s:
3190 pollcnt = 80000; /* 8 sec */
3192 case IXGBE_PCIDEVCTRL2_17_34s:
3193 pollcnt = 34000; /* 34 sec */
3195 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3196 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3197 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3198 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3200 pollcnt = 800; /* 80 millisec minimum */
3204 /* add 10% to spec maximum */
3205 return (pollcnt * 11) / 10;
3209 * ixgbe_disable_pcie_master - Disable PCI-express master access
3210 * @hw: pointer to hardware structure
3212 * Disables PCI-Express master access and verifies there are no pending
3213 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3214 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3215 * is returned signifying master requests disabled.
3217 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3219 s32 status = IXGBE_SUCCESS;
3223 DEBUGFUNC("ixgbe_disable_pcie_master");
3225 /* Always set this bit to ensure any future transactions are blocked */
3226 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3228 /* Exit if master requests are blocked */
3229 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3230 IXGBE_REMOVED(hw->hw_addr))
3233 /* Poll for master request bit to clear */
3234 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3236 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3241 * Two consecutive resets are required via CTRL.RST per datasheet
3242 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3243 * of this need. The first reset prevents new master requests from
3244 * being issued by our device. We then must wait 1usec or more for any
3245 * remaining completions from the PCIe bus to trickle in, and then reset
3246 * again to clear out any effects they may have had on our device.
3248 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3249 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3251 if (hw->mac.type >= ixgbe_mac_X550)
3255 * Before proceeding, make sure that the PCIe block does not have
3256 * transactions pending.
3258 poll = ixgbe_pcie_timeout_poll(hw);
3259 for (i = 0; i < poll; i++) {
3261 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3262 if (IXGBE_REMOVED(hw->hw_addr))
3264 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3268 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3269 "PCIe transaction pending bit also did not clear.\n");
3270 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3277 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3278 * @hw: pointer to hardware structure
3279 * @mask: Mask to specify which semaphore to acquire
3281 * Acquires the SWFW semaphore through the GSSR register for the specified
3282 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3284 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3288 u32 fwmask = mask << 5;
3292 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3294 for (i = 0; i < timeout; i++) {
3296 * SW NVM semaphore bit is used for access to all
3297 * SW_FW_SYNC bits (not just NVM)
3299 if (ixgbe_get_eeprom_semaphore(hw))
3300 return IXGBE_ERR_SWFW_SYNC;
3302 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3303 if (!(gssr & (fwmask | swmask))) {
3305 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3306 ixgbe_release_eeprom_semaphore(hw);
3307 return IXGBE_SUCCESS;
3309 /* Resource is currently in use by FW or SW */
3310 ixgbe_release_eeprom_semaphore(hw);
3315 /* If time expired clear the bits holding the lock and retry */
3316 if (gssr & (fwmask | swmask))
3317 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3320 return IXGBE_ERR_SWFW_SYNC;
3324 * ixgbe_release_swfw_sync - Release SWFW semaphore
3325 * @hw: pointer to hardware structure
3326 * @mask: Mask to specify which semaphore to release
3328 * Releases the SWFW semaphore through the GSSR register for the specified
3329 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3331 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3336 DEBUGFUNC("ixgbe_release_swfw_sync");
3338 ixgbe_get_eeprom_semaphore(hw);
3340 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3342 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3344 ixgbe_release_eeprom_semaphore(hw);
3348 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3349 * @hw: pointer to hardware structure
3351 * Stops the receive data path and waits for the HW to internally empty
3352 * the Rx security block
3354 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3356 #define IXGBE_MAX_SECRX_POLL 4000
3361 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3364 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3365 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3366 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3367 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3368 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3369 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3372 /* Use interrupt-safe sleep just in case */
3376 /* For informational purposes only */
3377 if (i >= IXGBE_MAX_SECRX_POLL)
3378 DEBUGOUT("Rx unit being enabled before security "
3379 "path fully disabled. Continuing with init.\n");
3381 return IXGBE_SUCCESS;
3385 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3386 * @hw: pointer to hardware structure
3387 * @locked: bool to indicate whether the SW/FW lock was taken
3388 * @reg_val: Value we read from AUTOC
3390 * The default case requires no protection so just to the register read.
3392 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3395 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3396 return IXGBE_SUCCESS;
3400 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3401 * @hw: pointer to hardware structure
3402 * @reg_val: value to write to AUTOC
3403 * @locked: bool to indicate whether the SW/FW lock was already taken by
3406 * The default case requires no protection so just to the register write.
3408 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3410 UNREFERENCED_1PARAMETER(locked);
3412 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3413 return IXGBE_SUCCESS;
3417 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3418 * @hw: pointer to hardware structure
3420 * Enables the receive data path.
3422 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3426 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3428 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3429 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3430 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3431 IXGBE_WRITE_FLUSH(hw);
3433 return IXGBE_SUCCESS;
3437 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3438 * @hw: pointer to hardware structure
3439 * @regval: register value to write to RXCTRL
3441 * Enables the Rx DMA unit
3443 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3445 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3447 if (regval & IXGBE_RXCTRL_RXEN)
3448 ixgbe_enable_rx(hw);
3450 ixgbe_disable_rx(hw);
3452 return IXGBE_SUCCESS;
3456 * ixgbe_blink_led_start_generic - Blink LED based on index.
3457 * @hw: pointer to hardware structure
3458 * @index: led number to blink
3460 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3462 ixgbe_link_speed speed = 0;
3465 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3466 s32 ret_val = IXGBE_SUCCESS;
3467 bool locked = false;
3469 DEBUGFUNC("ixgbe_blink_led_start_generic");
3472 return IXGBE_ERR_PARAM;
3475 * Link must be up to auto-blink the LEDs;
3476 * Force it if link is down.
3478 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3481 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3482 if (ret_val != IXGBE_SUCCESS)
3485 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3486 autoc_reg |= IXGBE_AUTOC_FLU;
3488 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3489 if (ret_val != IXGBE_SUCCESS)
3492 IXGBE_WRITE_FLUSH(hw);
3496 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3497 led_reg |= IXGBE_LED_BLINK(index);
3498 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3499 IXGBE_WRITE_FLUSH(hw);
3506 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3507 * @hw: pointer to hardware structure
3508 * @index: led number to stop blinking
3510 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3513 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3514 s32 ret_val = IXGBE_SUCCESS;
3515 bool locked = false;
3517 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3520 return IXGBE_ERR_PARAM;
3523 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3524 if (ret_val != IXGBE_SUCCESS)
3527 autoc_reg &= ~IXGBE_AUTOC_FLU;
3528 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3530 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3531 if (ret_val != IXGBE_SUCCESS)
3534 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3535 led_reg &= ~IXGBE_LED_BLINK(index);
3536 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3537 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3538 IXGBE_WRITE_FLUSH(hw);
3545 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3546 * @hw: pointer to hardware structure
3547 * @san_mac_offset: SAN MAC address offset
3549 * This function will read the EEPROM location for the SAN MAC address
3550 * pointer, and returns the value at that location. This is used in both
3551 * get and set mac_addr routines.
3553 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3554 u16 *san_mac_offset)
3558 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3561 * First read the EEPROM pointer to see if the MAC addresses are
3564 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3567 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3568 "eeprom at offset %d failed",
3569 IXGBE_SAN_MAC_ADDR_PTR);
3576 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3577 * @hw: pointer to hardware structure
3578 * @san_mac_addr: SAN MAC address
3580 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3581 * per-port, so set_lan_id() must be called before reading the addresses.
3582 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3583 * upon for non-SFP connections, so we must call it here.
3585 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3587 u16 san_mac_data, san_mac_offset;
3591 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3594 * First read the EEPROM pointer to see if the MAC addresses are
3595 * available. If they're not, no point in calling set_lan_id() here.
3597 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3598 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3599 goto san_mac_addr_out;
3601 /* make sure we know which port we need to program */
3602 hw->mac.ops.set_lan_id(hw);
3603 /* apply the port offset to the address offset */
3604 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3605 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3606 for (i = 0; i < 3; i++) {
3607 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3610 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3611 "eeprom read at offset %d failed",
3613 goto san_mac_addr_out;
3615 san_mac_addr[i * 2] = (u8)(san_mac_data);
3616 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3619 return IXGBE_SUCCESS;
3623 * No addresses available in this EEPROM. It's not an
3624 * error though, so just wipe the local address and return.
3626 for (i = 0; i < 6; i++)
3627 san_mac_addr[i] = 0xFF;
3628 return IXGBE_SUCCESS;
3632 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3633 * @hw: pointer to hardware structure
3634 * @san_mac_addr: SAN MAC address
3636 * Write a SAN MAC address to the EEPROM.
3638 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3641 u16 san_mac_data, san_mac_offset;
3644 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3646 /* Look for SAN mac address pointer. If not defined, return */
3647 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3648 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3649 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3651 /* Make sure we know which port we need to write */
3652 hw->mac.ops.set_lan_id(hw);
3653 /* Apply the port offset to the address offset */
3654 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3655 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3657 for (i = 0; i < 3; i++) {
3658 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3659 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3660 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3664 return IXGBE_SUCCESS;
3668 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3669 * @hw: pointer to hardware structure
3671 * Read PCIe configuration space, and get the MSI-X vector count from
3672 * the capabilities table.
3674 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3680 switch (hw->mac.type) {
3681 case ixgbe_mac_82598EB:
3682 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3683 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3685 case ixgbe_mac_82599EB:
3686 case ixgbe_mac_X540:
3687 case ixgbe_mac_X550:
3688 case ixgbe_mac_X550EM_x:
3689 case ixgbe_mac_X550EM_a:
3690 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3691 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3697 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3698 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3699 if (IXGBE_REMOVED(hw->hw_addr))
3701 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3703 /* MSI-X count is zero-based in HW */
3706 if (msix_count > max_msix_count)
3707 msix_count = max_msix_count;
3713 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3714 * @hw: pointer to hardware structure
3715 * @addr: Address to put into receive address register
3716 * @vmdq: VMDq pool to assign
3718 * Puts an ethernet address into a receive address register, or
3719 * finds the rar that it is aleady in; adds to the pool list
3721 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3723 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3724 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3726 u32 rar_low, rar_high;
3727 u32 addr_low, addr_high;
3729 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3731 /* swap bytes for HW little endian */
3732 addr_low = addr[0] | (addr[1] << 8)
3735 addr_high = addr[4] | (addr[5] << 8);
3738 * Either find the mac_id in rar or find the first empty space.
3739 * rar_highwater points to just after the highest currently used
3740 * rar in order to shorten the search. It grows when we add a new
3743 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3744 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3746 if (((IXGBE_RAH_AV & rar_high) == 0)
3747 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3748 first_empty_rar = rar;
3749 } else if ((rar_high & 0xFFFF) == addr_high) {
3750 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3751 if (rar_low == addr_low)
3752 break; /* found it already in the rars */
3756 if (rar < hw->mac.rar_highwater) {
3757 /* already there so just add to the pool bits */
3758 ixgbe_set_vmdq(hw, rar, vmdq);
3759 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3760 /* stick it into first empty RAR slot we found */
3761 rar = first_empty_rar;
3762 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3763 } else if (rar == hw->mac.rar_highwater) {
3764 /* add it to the top of the list and inc the highwater mark */
3765 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3766 hw->mac.rar_highwater++;
3767 } else if (rar >= hw->mac.num_rar_entries) {
3768 return IXGBE_ERR_INVALID_MAC_ADDR;
3772 * If we found rar[0], make sure the default pool bit (we use pool 0)
3773 * remains cleared to be sure default pool packets will get delivered
3776 ixgbe_clear_vmdq(hw, rar, 0);
3782 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3783 * @hw: pointer to hardware struct
3784 * @rar: receive address register index to disassociate
3785 * @vmdq: VMDq pool index to remove from the rar
3787 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3789 u32 mpsar_lo, mpsar_hi;
3790 u32 rar_entries = hw->mac.num_rar_entries;
3792 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3794 /* Make sure we are using a valid rar index range */
3795 if (rar >= rar_entries) {
3796 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3797 "RAR index %d is out of range.\n", rar);
3798 return IXGBE_ERR_INVALID_ARGUMENT;
3801 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3802 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3804 if (IXGBE_REMOVED(hw->hw_addr))
3807 if (!mpsar_lo && !mpsar_hi)
3810 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3812 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3816 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3819 } else if (vmdq < 32) {
3820 mpsar_lo &= ~(1 << vmdq);
3821 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3823 mpsar_hi &= ~(1 << (vmdq - 32));
3824 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3827 /* was that the last pool using this rar? */
3828 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3829 rar != 0 && rar != hw->mac.san_mac_rar_index)
3830 hw->mac.ops.clear_rar(hw, rar);
3832 return IXGBE_SUCCESS;
3836 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3837 * @hw: pointer to hardware struct
3838 * @rar: receive address register index to associate with a VMDq index
3839 * @vmdq: VMDq pool index
3841 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3844 u32 rar_entries = hw->mac.num_rar_entries;
3846 DEBUGFUNC("ixgbe_set_vmdq_generic");
3848 /* Make sure we are using a valid rar index range */
3849 if (rar >= rar_entries) {
3850 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3851 "RAR index %d is out of range.\n", rar);
3852 return IXGBE_ERR_INVALID_ARGUMENT;
3856 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3858 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3860 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3861 mpsar |= 1 << (vmdq - 32);
3862 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3864 return IXGBE_SUCCESS;
3868 * This function should only be involved in the IOV mode.
3869 * In IOV mode, Default pool is next pool after the number of
3870 * VFs advertized and not 0.
3871 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3873 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3874 * @hw: pointer to hardware struct
3875 * @vmdq: VMDq pool index
3877 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3879 u32 rar = hw->mac.san_mac_rar_index;
3881 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3884 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3885 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3887 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3888 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3891 return IXGBE_SUCCESS;
3895 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3896 * @hw: pointer to hardware structure
3898 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3902 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3903 DEBUGOUT(" Clearing UTA\n");
3905 for (i = 0; i < 128; i++)
3906 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3908 return IXGBE_SUCCESS;
3912 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3913 * @hw: pointer to hardware structure
3914 * @vlan: VLAN id to write to VLAN filter
3915 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
3919 * return the VLVF index where this VLAN id should be placed
3922 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3924 s32 regindex, first_empty_slot;
3927 /* short cut the special case */
3931 /* if vlvf_bypass is set we don't want to use an empty slot, we
3932 * will simply bypass the VLVF if there are no entries present in the
3933 * VLVF that contain our VLAN
3935 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3937 /* add VLAN enable bit for comparison */
3938 vlan |= IXGBE_VLVF_VIEN;
3940 /* Search for the vlan id in the VLVF entries. Save off the first empty
3941 * slot found along the way.
3943 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3945 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3946 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3949 if (!first_empty_slot && !bits)
3950 first_empty_slot = regindex;
3953 /* If we are here then we didn't find the VLAN. Return first empty
3954 * slot we found during our search, else error.
3956 if (!first_empty_slot)
3957 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3959 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3963 * ixgbe_set_vfta_generic - Set VLAN filter table
3964 * @hw: pointer to hardware structure
3965 * @vlan: VLAN id to write to VLAN filter
3966 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3967 * @vlan_on: boolean flag to turn on/off VLAN
3968 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3970 * Turn on/off specified VLAN in the VLAN filter table.
3972 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3973 bool vlan_on, bool vlvf_bypass)
3975 u32 regidx, vfta_delta, vfta;
3978 DEBUGFUNC("ixgbe_set_vfta_generic");
3980 if (vlan > 4095 || vind > 63)
3981 return IXGBE_ERR_PARAM;
3984 * this is a 2 part operation - first the VFTA, then the
3985 * VLVF and VLVFB if VT Mode is set
3986 * We don't write the VFTA until we know the VLVF part succeeded.
3990 * The VFTA is a bitstring made up of 128 32-bit registers
3991 * that enable the particular VLAN id, much like the MTA:
3992 * bits[11-5]: which register
3993 * bits[4-0]: which bit in the register
3996 vfta_delta = 1 << (vlan % 32);
3997 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
4000 * vfta_delta represents the difference between the current value
4001 * of vfta and the value we want in the register. Since the diff
4002 * is an XOR mask we can just update the vfta using an XOR
4004 vfta_delta &= vlan_on ? ~vfta : vfta;
4008 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
4010 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4012 if (ret_val != IXGBE_SUCCESS) {
4019 /* Update VFTA now that we are ready for traffic */
4021 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4023 return IXGBE_SUCCESS;
4027 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4028 * @hw: pointer to hardware structure
4029 * @vlan: VLAN id to write to VLAN filter
4030 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4031 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4032 * @vfta_delta: pointer to the difference between the current value of VFTA
4033 * and the desired value
4034 * @vfta: the desired value of the VFTA
4035 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4037 * Turn on/off specified bit in VLVF table.
4039 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4040 bool vlan_on, u32 *vfta_delta, u32 vfta,
4046 DEBUGFUNC("ixgbe_set_vlvf_generic");
4048 if (vlan > 4095 || vind > 63)
4049 return IXGBE_ERR_PARAM;
4051 /* If VT Mode is set
4053 * make sure the vlan is in VLVF
4054 * set the vind bit in the matching VLVFB
4056 * clear the pool bit and possibly the vind
4058 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4059 return IXGBE_SUCCESS;
4061 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4065 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4067 /* set the pool bit */
4068 bits |= 1 << (vind % 32);
4072 /* clear the pool bit */
4073 bits ^= 1 << (vind % 32);
4076 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4077 /* Clear VFTA first, then disable VLVF. Otherwise
4078 * we run the risk of stray packets leaking into
4079 * the PF via the default pool
4082 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4084 /* disable VLVF and clear remaining bit from pool */
4085 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4086 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4088 return IXGBE_SUCCESS;
4091 /* If there are still bits set in the VLVFB registers
4092 * for the VLAN ID indicated we need to see if the
4093 * caller is requesting that we clear the VFTA entry bit.
4094 * If the caller has requested that we clear the VFTA
4095 * entry bit but there are still pools/VFs using this VLAN
4096 * ID entry then ignore the request. We're not worried
4097 * about the case where we're turning the VFTA VLAN ID
4098 * entry bit on, only when requested to turn it off as
4099 * there may be multiple pools and/or VFs using the
4100 * VLAN ID entry. In that case we cannot clear the
4101 * VFTA bit until all pools/VFs using that VLAN ID have also
4102 * been cleared. This will be indicated by "bits" being
4108 /* record pool change and enable VLAN ID if not already enabled */
4109 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4110 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4112 return IXGBE_SUCCESS;
4116 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4117 * @hw: pointer to hardware structure
4119 * Clears the VLAN filer table, and the VMDq index associated with the filter
4121 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4125 DEBUGFUNC("ixgbe_clear_vfta_generic");
4127 for (offset = 0; offset < hw->mac.vft_size; offset++)
4128 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4130 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4131 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4132 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4133 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4136 return IXGBE_SUCCESS;
4140 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4141 * @hw: pointer to hardware structure
4143 * Contains the logic to identify if we need to verify link for the
4146 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4149 /* Does FW say we need the fix */
4150 if (!hw->need_crosstalk_fix)
4153 /* Only consider SFP+ PHYs i.e. media type fiber */
4154 switch (hw->mac.ops.get_media_type(hw)) {
4155 case ixgbe_media_type_fiber:
4156 case ixgbe_media_type_fiber_qsfp:
4166 * ixgbe_check_mac_link_generic - Determine link and speed status
4167 * @hw: pointer to hardware structure
4168 * @speed: pointer to link speed
4169 * @link_up: true when link is up
4170 * @link_up_wait_to_complete: bool used to wait for link up or not
4172 * Reads the links register to determine if link is up and the current speed
4174 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4175 bool *link_up, bool link_up_wait_to_complete)
4177 u32 links_reg, links_orig;
4180 DEBUGFUNC("ixgbe_check_mac_link_generic");
4182 /* If Crosstalk fix enabled do the sanity check of making sure
4183 * the SFP+ cage is full.
4185 if (ixgbe_need_crosstalk_fix(hw)) {
4188 switch (hw->mac.type) {
4189 case ixgbe_mac_82599EB:
4190 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4193 case ixgbe_mac_X550EM_x:
4194 case ixgbe_mac_X550EM_a:
4195 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4199 /* sanity check - No SFP+ devices here */
4200 sfp_cage_full = false;
4204 if (!sfp_cage_full) {
4206 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4207 return IXGBE_SUCCESS;
4211 /* clear the old state */
4212 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4214 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4216 if (links_orig != links_reg) {
4217 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4218 links_orig, links_reg);
4221 if (link_up_wait_to_complete) {
4222 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4223 if (links_reg & IXGBE_LINKS_UP) {
4230 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4233 if (links_reg & IXGBE_LINKS_UP)
4239 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4240 case IXGBE_LINKS_SPEED_10G_82599:
4241 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4242 if (hw->mac.type >= ixgbe_mac_X550) {
4243 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4244 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4247 case IXGBE_LINKS_SPEED_1G_82599:
4248 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4250 case IXGBE_LINKS_SPEED_100_82599:
4251 *speed = IXGBE_LINK_SPEED_100_FULL;
4252 if (hw->mac.type == ixgbe_mac_X550) {
4253 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4254 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4257 case IXGBE_LINKS_SPEED_10_X550EM_A:
4258 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4259 #ifdef PREBOOT_SUPPORT
4260 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4261 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L ||
4262 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
4263 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
4264 *speed = IXGBE_LINK_SPEED_10_FULL;
4266 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4267 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4268 *speed = IXGBE_LINK_SPEED_10_FULL;
4269 #endif /* PREBOOT_SUPPORT */
4272 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4275 return IXGBE_SUCCESS;
4279 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4281 * @hw: pointer to hardware structure
4282 * @wwnn_prefix: the alternative WWNN prefix
4283 * @wwpn_prefix: the alternative WWPN prefix
4285 * This function will read the EEPROM from the alternative SAN MAC address
4286 * block to check the support for the alternative WWNN/WWPN prefix support.
4288 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4292 u16 alt_san_mac_blk_offset;
4294 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4296 /* clear output first */
4297 *wwnn_prefix = 0xFFFF;
4298 *wwpn_prefix = 0xFFFF;
4300 /* check if alternative SAN MAC is supported */
4301 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4302 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4303 goto wwn_prefix_err;
4305 if ((alt_san_mac_blk_offset == 0) ||
4306 (alt_san_mac_blk_offset == 0xFFFF))
4307 goto wwn_prefix_out;
4309 /* check capability in alternative san mac address block */
4310 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4311 if (hw->eeprom.ops.read(hw, offset, &caps))
4312 goto wwn_prefix_err;
4313 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4314 goto wwn_prefix_out;
4316 /* get the corresponding prefix for WWNN/WWPN */
4317 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4318 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4319 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4320 "eeprom read at offset %d failed", offset);
4323 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4324 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4325 goto wwn_prefix_err;
4328 return IXGBE_SUCCESS;
4331 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4332 "eeprom read at offset %d failed", offset);
4333 return IXGBE_SUCCESS;
4337 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4338 * @hw: pointer to hardware structure
4339 * @bs: the fcoe boot status
4341 * This function will read the FCOE boot status from the iSCSI FCOE block
4343 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4345 u16 offset, caps, flags;
4348 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4350 /* clear output first */
4351 *bs = ixgbe_fcoe_bootstatus_unavailable;
4353 /* check if FCOE IBA block is present */
4354 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4355 status = hw->eeprom.ops.read(hw, offset, &caps);
4356 if (status != IXGBE_SUCCESS)
4359 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4362 /* check if iSCSI FCOE block is populated */
4363 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4364 if (status != IXGBE_SUCCESS)
4367 if ((offset == 0) || (offset == 0xFFFF))
4370 /* read fcoe flags in iSCSI FCOE block */
4371 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4372 status = hw->eeprom.ops.read(hw, offset, &flags);
4373 if (status != IXGBE_SUCCESS)
4376 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4377 *bs = ixgbe_fcoe_bootstatus_enabled;
4379 *bs = ixgbe_fcoe_bootstatus_disabled;
4386 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4387 * @hw: pointer to hardware structure
4388 * @enable: enable or disable switch for MAC anti-spoofing
4389 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4392 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4394 int vf_target_reg = vf >> 3;
4395 int vf_target_shift = vf % 8;
4398 if (hw->mac.type == ixgbe_mac_82598EB)
4401 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4403 pfvfspoof |= (1 << vf_target_shift);
4405 pfvfspoof &= ~(1 << vf_target_shift);
4406 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4410 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4411 * @hw: pointer to hardware structure
4412 * @enable: enable or disable switch for VLAN anti-spoofing
4413 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4416 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4418 int vf_target_reg = vf >> 3;
4419 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4422 if (hw->mac.type == ixgbe_mac_82598EB)
4425 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4427 pfvfspoof |= (1 << vf_target_shift);
4429 pfvfspoof &= ~(1 << vf_target_shift);
4430 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4434 * ixgbe_get_device_caps_generic - Get additional device capabilities
4435 * @hw: pointer to hardware structure
4436 * @device_caps: the EEPROM word with the extra device capabilities
4438 * This function will read the EEPROM location for the device capabilities,
4439 * and return the word through device_caps.
4441 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4443 DEBUGFUNC("ixgbe_get_device_caps_generic");
4445 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4447 return IXGBE_SUCCESS;
4451 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4452 * @hw: pointer to hardware structure
4455 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4460 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4462 /* Enable relaxed ordering */
4463 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4464 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4465 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4466 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4469 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4470 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4471 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4472 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4473 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4479 * ixgbe_calculate_checksum - Calculate checksum for buffer
4480 * @buffer: pointer to EEPROM
4481 * @length: size of EEPROM to calculate a checksum for
4482 * Calculates the checksum for some buffer on a specified length. The
4483 * checksum calculated is returned.
4485 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4490 DEBUGFUNC("ixgbe_calculate_checksum");
4495 for (i = 0; i < length; i++)
4498 return (u8) (0 - sum);
4502 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4503 * @hw: pointer to the HW structure
4504 * @buffer: command to write and where the return status will be placed
4505 * @length: length of buffer, must be multiple of 4 bytes
4506 * @timeout: time in ms to wait for command completion
4508 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4509 * else returns semaphore error when encountering an error acquiring
4510 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4512 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4515 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4521 DEBUGFUNC("ixgbe_hic_unlocked");
4523 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4524 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4525 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4528 /* Set bit 9 of FWSTS clearing FW reset indication */
4529 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4530 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4532 /* Check that the host interface is enabled. */
4533 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4534 if (!(hicr & IXGBE_HICR_EN)) {
4535 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4536 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4539 /* Calculate length in DWORDs. We must be DWORD aligned */
4540 if (length % sizeof(u32)) {
4541 DEBUGOUT("Buffer length failure, not aligned to dword");
4542 return IXGBE_ERR_INVALID_ARGUMENT;
4545 dword_len = length >> 2;
4547 /* The device driver writes the relevant command block
4548 * into the ram area.
4550 for (i = 0; i < dword_len; i++)
4551 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4552 i, IXGBE_CPU_TO_LE32(buffer[i]));
4554 /* Setting this bit tells the ARC that a new command is pending. */
4555 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4557 for (i = 0; i < timeout; i++) {
4558 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4559 if (!(hicr & IXGBE_HICR_C))
4564 /* Check command completion */
4565 if ((timeout && i == timeout) ||
4566 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4567 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4568 "Command has failed with no status valid.\n");
4569 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4572 return IXGBE_SUCCESS;
4576 * ixgbe_host_interface_command - Issue command to manageability block
4577 * @hw: pointer to the HW structure
4578 * @buffer: contains the command to write and where the return status will
4580 * @length: length of buffer, must be multiple of 4 bytes
4581 * @timeout: time in ms to wait for command completion
4582 * @return_data: read and return data from the buffer (true) or not (false)
4583 * Needed because FW structures are big endian and decoding of
4584 * these fields can be 8 bit or 16 bit based on command. Decoding
4585 * is not easily understood without making a table of commands.
4586 * So we will leave this up to the caller to read back the data
4589 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4590 * else returns semaphore error when encountering an error acquiring
4591 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4593 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4594 u32 length, u32 timeout, bool return_data)
4596 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4597 struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4603 DEBUGFUNC("ixgbe_host_interface_command");
4605 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4606 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4607 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4610 /* Take management host interface semaphore */
4611 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4615 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4622 /* Calculate length in DWORDs */
4623 dword_len = hdr_size >> 2;
4625 /* first pull in the header so we know the buffer length */
4626 for (bi = 0; bi < dword_len; bi++) {
4627 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4628 IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4632 * If there is any thing in data position pull it in
4633 * Read Flash command requires reading buffer length from
4634 * two byes instead of one byte
4636 if (resp->cmd == 0x30) {
4637 for (; bi < dword_len + 2; bi++) {
4638 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4640 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4642 buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4643 & 0xF00) | resp->buf_len;
4644 hdr_size += (2 << 2);
4646 buf_len = resp->buf_len;
4651 if (length < buf_len + hdr_size) {
4652 DEBUGOUT("Buffer not large enough for reply message.\n");
4653 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4657 /* Calculate length in DWORDs, add 3 for odd lengths */
4658 dword_len = (buf_len + 3) >> 2;
4660 /* Pull in the rest of the buffer (bi is where we left off) */
4661 for (; bi <= dword_len; bi++) {
4662 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4663 IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4667 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4673 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4674 * @hw: pointer to the HW structure
4675 * @maj: driver version major number
4676 * @min: driver version minor number
4677 * @build: driver version build number
4678 * @sub: driver version sub build number
4680 * @driver_ver: unused
4682 * Sends driver version number to firmware through the manageability
4683 * block. On success return IXGBE_SUCCESS
4684 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4685 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4687 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4688 u8 build, u8 sub, u16 len,
4689 const char *driver_ver)
4691 struct ixgbe_hic_drv_info fw_cmd;
4693 s32 ret_val = IXGBE_SUCCESS;
4695 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4696 UNREFERENCED_2PARAMETER(len, driver_ver);
4698 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4699 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4700 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4701 fw_cmd.port_num = (u8)hw->bus.func;
4702 fw_cmd.ver_maj = maj;
4703 fw_cmd.ver_min = min;
4704 fw_cmd.ver_build = build;
4705 fw_cmd.ver_sub = sub;
4706 fw_cmd.hdr.checksum = 0;
4709 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4710 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4712 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4713 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4715 IXGBE_HI_COMMAND_TIMEOUT,
4717 if (ret_val != IXGBE_SUCCESS)
4720 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4721 FW_CEM_RESP_STATUS_SUCCESS)
4722 ret_val = IXGBE_SUCCESS;
4724 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4733 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4734 * @hw: pointer to hardware structure
4735 * @num_pb: number of packet buffers to allocate
4736 * @headroom: reserve n KB of headroom
4737 * @strategy: packet buffer allocation strategy
4739 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4742 u32 pbsize = hw->mac.rx_pb_size;
4744 u32 rxpktsize, txpktsize, txpbthresh;
4746 /* Reserve headroom */
4752 /* Divide remaining packet buffer space amongst the number of packet
4753 * buffers requested using supplied strategy.
4756 case PBA_STRATEGY_WEIGHTED:
4757 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4758 * buffer with 5/8 of the packet buffer space.
4760 rxpktsize = (pbsize * 5) / (num_pb * 4);
4761 pbsize -= rxpktsize * (num_pb / 2);
4762 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4763 for (; i < (num_pb / 2); i++)
4764 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4765 /* fall through - configure remaining packet buffers */
4766 case PBA_STRATEGY_EQUAL:
4767 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4768 for (; i < num_pb; i++)
4769 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4775 /* Only support an equally distributed Tx packet buffer strategy. */
4776 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4777 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4778 for (i = 0; i < num_pb; i++) {
4779 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4780 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4783 /* Clear unused TCs, if any, to zero buffer size*/
4784 for (; i < IXGBE_MAX_PB; i++) {
4785 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4786 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4787 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4792 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4793 * @hw: pointer to the hardware structure
4795 * The 82599 and x540 MACs can experience issues if TX work is still pending
4796 * when a reset occurs. This function prevents this by flushing the PCIe
4797 * buffers on the system.
4799 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4801 u32 gcr_ext, hlreg0, i, poll;
4805 * If double reset is not requested then all transactions should
4806 * already be clear and as such there is no work to do
4808 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4812 * Set loopback enable to prevent any transmits from being sent
4813 * should the link come up. This assumes that the RXCTRL.RXEN bit
4814 * has already been cleared.
4816 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4817 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4819 /* Wait for a last completion before clearing buffers */
4820 IXGBE_WRITE_FLUSH(hw);
4824 * Before proceeding, make sure that the PCIe block does not have
4825 * transactions pending.
4827 poll = ixgbe_pcie_timeout_poll(hw);
4828 for (i = 0; i < poll; i++) {
4830 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4831 if (IXGBE_REMOVED(hw->hw_addr))
4833 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4838 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4839 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4840 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4841 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4843 /* Flush all writes and allow 20usec for all transactions to clear */
4844 IXGBE_WRITE_FLUSH(hw);
4847 /* restore previous register values */
4848 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4849 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4852 STATIC const u8 ixgbe_emc_temp_data[4] = {
4853 IXGBE_EMC_INTERNAL_DATA,
4854 IXGBE_EMC_DIODE1_DATA,
4855 IXGBE_EMC_DIODE2_DATA,
4856 IXGBE_EMC_DIODE3_DATA
4858 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4859 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4860 IXGBE_EMC_DIODE1_THERM_LIMIT,
4861 IXGBE_EMC_DIODE2_THERM_LIMIT,
4862 IXGBE_EMC_DIODE3_THERM_LIMIT
4866 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4867 * @hw: pointer to hardware structure
4869 * Returns the thermal sensor data structure
4871 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4873 s32 status = IXGBE_SUCCESS;
4881 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4883 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4885 /* Only support thermal sensors attached to 82599 physical port 0 */
4886 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4887 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4888 status = IXGBE_NOT_IMPLEMENTED;
4892 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4896 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4897 status = IXGBE_NOT_IMPLEMENTED;
4901 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4905 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4906 != IXGBE_ETS_TYPE_EMC) {
4907 status = IXGBE_NOT_IMPLEMENTED;
4911 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4912 if (num_sensors > IXGBE_MAX_SENSORS)
4913 num_sensors = IXGBE_MAX_SENSORS;
4915 for (i = 0; i < num_sensors; i++) {
4916 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4921 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4922 IXGBE_ETS_DATA_INDEX_SHIFT);
4923 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4924 IXGBE_ETS_DATA_LOC_SHIFT);
4926 if (sensor_location != 0) {
4927 status = hw->phy.ops.read_i2c_byte(hw,
4928 ixgbe_emc_temp_data[sensor_index],
4929 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4930 &data->sensor[i].temp);
4940 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4941 * @hw: pointer to hardware structure
4943 * Inits the thermal sensor thresholds according to the NVM map
4944 * and save off the threshold and location values into mac.thermal_sensor_data
4946 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4948 s32 status = IXGBE_SUCCESS;
4953 u8 low_thresh_delta;
4959 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4961 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4963 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4965 /* Only support thermal sensors attached to 82599 physical port 0 */
4966 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4967 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4968 return IXGBE_NOT_IMPLEMENTED;
4970 offset = IXGBE_ETS_CFG;
4971 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4973 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4974 return IXGBE_NOT_IMPLEMENTED;
4976 offset = ets_offset;
4977 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4979 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4980 != IXGBE_ETS_TYPE_EMC)
4981 return IXGBE_NOT_IMPLEMENTED;
4983 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4984 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4985 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4987 for (i = 0; i < num_sensors; i++) {
4988 offset = ets_offset + 1 + i;
4989 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4990 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4991 "eeprom read at offset %d failed",
4995 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4996 IXGBE_ETS_DATA_INDEX_SHIFT);
4997 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4998 IXGBE_ETS_DATA_LOC_SHIFT);
4999 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
5001 hw->phy.ops.write_i2c_byte(hw,
5002 ixgbe_emc_therm_limit[sensor_index],
5003 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
5005 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
5006 data->sensor[i].location = sensor_location;
5007 data->sensor[i].caution_thresh = therm_limit;
5008 data->sensor[i].max_op_thresh = therm_limit -
5015 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
5016 "eeprom read at offset %d failed", offset);
5017 return IXGBE_NOT_IMPLEMENTED;
5021 * ixgbe_get_orom_version - Return option ROM from EEPROM
5023 * @hw: pointer to hardware structure
5024 * @nvm_ver: pointer to output structure
5026 * if valid option ROM version, nvm_ver->or_valid set to true
5027 * else nvm_ver->or_valid is false.
5029 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5030 struct ixgbe_nvm_version *nvm_ver)
5032 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5034 nvm_ver->or_valid = false;
5035 /* Option Rom may or may not be present. Start with pointer */
5036 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5038 /* make sure offset is valid */
5039 if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5042 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5043 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5045 /* option rom exists and is valid */
5046 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5047 eeprom_cfg_blkl == NVM_VER_INVALID ||
5048 eeprom_cfg_blkh == NVM_VER_INVALID)
5051 nvm_ver->or_valid = true;
5052 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5053 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5054 (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5055 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5059 * ixgbe_get_oem_prod_version - Return OEM Product version
5061 * @hw: pointer to hardware structure
5062 * @nvm_ver: pointer to output structure
5064 * if valid OEM product version, nvm_ver->oem_valid set to true
5065 * else nvm_ver->oem_valid is false.
5067 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5068 struct ixgbe_nvm_version *nvm_ver)
5070 u16 rel_num, prod_ver, mod_len, cap, offset;
5072 nvm_ver->oem_valid = false;
5073 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5075 /* Return is offset to OEM Product Version block is invalid */
5076 if (offset == 0x0 && offset == NVM_INVALID_PTR)
5079 /* Read product version block */
5080 hw->eeprom.ops.read(hw, offset, &mod_len);
5081 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5083 /* Return if OEM product version block is invalid */
5084 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5085 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5088 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5089 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5091 /* Return if version is invalid */
5092 if ((rel_num | prod_ver) == 0x0 ||
5093 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5096 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5097 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5098 nvm_ver->oem_release = rel_num;
5099 nvm_ver->oem_valid = true;
5103 * ixgbe_get_etk_id - Return Etrack ID from EEPROM
5105 * @hw: pointer to hardware structure
5106 * @nvm_ver: pointer to output structure
5108 * word read errors will return 0xFFFF
5110 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5112 u16 etk_id_l, etk_id_h;
5114 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5115 etk_id_l = NVM_VER_INVALID;
5116 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5117 etk_id_h = NVM_VER_INVALID;
5119 /* The word order for the version format is determined by high order
5122 if ((etk_id_h & NVM_ETK_VALID) == 0) {
5123 nvm_ver->etk_id = etk_id_h;
5124 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5126 nvm_ver->etk_id = etk_id_l;
5127 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5133 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5134 * @hw: pointer to hardware structure
5135 * @map: pointer to u8 arr for returning map
5137 * Read the rtrup2tc HW register and resolve its content into map
5139 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5143 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5144 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5145 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5146 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5150 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5155 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5156 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5157 if (hw->mac.type != ixgbe_mac_82598EB) {
5158 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5159 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5160 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5161 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5162 hw->mac.set_lben = true;
5164 hw->mac.set_lben = false;
5167 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5168 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5172 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5177 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5178 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5180 if (hw->mac.type != ixgbe_mac_82598EB) {
5181 if (hw->mac.set_lben) {
5182 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5183 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5184 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5185 hw->mac.set_lben = false;
5191 * ixgbe_mng_present - returns true when management capability is present
5192 * @hw: pointer to hardware structure
5194 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5198 if (hw->mac.type < ixgbe_mac_82599EB)
5201 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5203 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5207 * ixgbe_mng_enabled - Is the manageability engine enabled?
5208 * @hw: pointer to hardware structure
5210 * Returns true if the manageability engine is enabled.
5212 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5214 u32 fwsm, manc, factps;
5216 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5217 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5220 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5221 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5224 if (hw->mac.type <= ixgbe_mac_X540) {
5225 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5226 if (factps & IXGBE_FACTPS_MNGCG)
5234 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5235 * @hw: pointer to hardware structure
5236 * @speed: new link speed
5237 * @autoneg_wait_to_complete: true when waiting for completion is needed
5239 * Set the link speed in the MAC and/or PHY register and restarts link.
5241 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5242 ixgbe_link_speed speed,
5243 bool autoneg_wait_to_complete)
5245 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5246 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5247 s32 status = IXGBE_SUCCESS;
5250 bool autoneg, link_up = false;
5252 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5254 /* Mask off requested but non-supported speeds */
5255 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5256 if (status != IXGBE_SUCCESS)
5259 speed &= link_speed;
5261 /* Try each speed one by one, highest priority first. We do this in
5262 * software because 10Gb fiber doesn't support speed autonegotiation.
5264 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5266 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5268 /* Set the module link speed */
5269 switch (hw->phy.media_type) {
5270 case ixgbe_media_type_fiber_fixed:
5271 case ixgbe_media_type_fiber:
5272 ixgbe_set_rate_select_speed(hw,
5273 IXGBE_LINK_SPEED_10GB_FULL);
5275 case ixgbe_media_type_fiber_qsfp:
5276 /* QSFP module automatically detects MAC link speed */
5279 DEBUGOUT("Unexpected media type.\n");
5283 /* Allow module to change analog characteristics (1G->10G) */
5286 status = ixgbe_setup_mac_link(hw,
5287 IXGBE_LINK_SPEED_10GB_FULL,
5288 autoneg_wait_to_complete);
5289 if (status != IXGBE_SUCCESS)
5292 /* Flap the Tx laser if it has not already been done */
5293 ixgbe_flap_tx_laser(hw);
5295 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5296 * Section 73.10.2, we may have to wait up to 500ms if KR is
5297 * attempted. 82599 uses the same timing for 10g SFI.
5299 for (i = 0; i < 5; i++) {
5300 /* Wait for the link partner to also set speed */
5303 /* If we have link, just jump out */
5304 status = ixgbe_check_link(hw, &link_speed,
5306 if (status != IXGBE_SUCCESS)
5314 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5316 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5317 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5319 /* Set the module link speed */
5320 switch (hw->phy.media_type) {
5321 case ixgbe_media_type_fiber_fixed:
5322 case ixgbe_media_type_fiber:
5323 ixgbe_set_rate_select_speed(hw,
5324 IXGBE_LINK_SPEED_1GB_FULL);
5326 case ixgbe_media_type_fiber_qsfp:
5327 /* QSFP module automatically detects link speed */
5330 DEBUGOUT("Unexpected media type.\n");
5334 /* Allow module to change analog characteristics (10G->1G) */
5337 status = ixgbe_setup_mac_link(hw,
5338 IXGBE_LINK_SPEED_1GB_FULL,
5339 autoneg_wait_to_complete);
5340 if (status != IXGBE_SUCCESS)
5343 /* Flap the Tx laser if it has not already been done */
5344 ixgbe_flap_tx_laser(hw);
5346 /* Wait for the link partner to also set speed */
5349 /* If we have link, just jump out */
5350 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5351 if (status != IXGBE_SUCCESS)
5358 /* We didn't get link. Configure back to the highest speed we tried,
5359 * (if there was more than one). We call ourselves back with just the
5360 * single highest speed that the user requested.
5363 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5365 autoneg_wait_to_complete);
5368 /* Set autoneg_advertised value based on input link speed */
5369 hw->phy.autoneg_advertised = 0;
5371 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5372 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5374 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5375 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5381 * ixgbe_set_soft_rate_select_speed - Set module link speed
5382 * @hw: pointer to hardware structure
5383 * @speed: link speed to set
5385 * Set module link speed via the soft rate select.
5387 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5388 ixgbe_link_speed speed)
5394 case IXGBE_LINK_SPEED_10GB_FULL:
5395 /* one bit mask same as setting on */
5396 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5398 case IXGBE_LINK_SPEED_1GB_FULL:
5399 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5402 DEBUGOUT("Invalid fixed module speed\n");
5407 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5408 IXGBE_I2C_EEPROM_DEV_ADDR2,
5411 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5415 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5417 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5418 IXGBE_I2C_EEPROM_DEV_ADDR2,
5421 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5426 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5427 IXGBE_I2C_EEPROM_DEV_ADDR2,
5430 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5434 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5436 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5437 IXGBE_I2C_EEPROM_DEV_ADDR2,
5440 DEBUGOUT("Failed to write Rx Rate Select RS1\n");