1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
117 /* RAR, Multicast, VLAN */
118 mac->ops.set_rar = ixgbe_set_rar_generic;
119 mac->ops.clear_rar = ixgbe_clear_rar_generic;
120 mac->ops.insert_mac_addr = NULL;
121 mac->ops.set_vmdq = NULL;
122 mac->ops.clear_vmdq = NULL;
123 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
124 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
125 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
126 mac->ops.enable_mc = ixgbe_enable_mc_generic;
127 mac->ops.disable_mc = ixgbe_disable_mc_generic;
128 mac->ops.clear_vfta = NULL;
129 mac->ops.set_vfta = NULL;
130 mac->ops.set_vlvf = NULL;
131 mac->ops.init_uta_tables = NULL;
132 mac->ops.enable_rx = ixgbe_enable_rx_generic;
133 mac->ops.disable_rx = ixgbe_disable_rx_generic;
136 mac->ops.fc_enable = ixgbe_fc_enable_generic;
137 mac->ops.setup_fc = ixgbe_setup_fc_generic;
138 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
141 mac->ops.get_link_capabilities = NULL;
142 mac->ops.setup_link = NULL;
143 mac->ops.check_link = NULL;
144 mac->ops.dmac_config = NULL;
145 mac->ops.dmac_update_tcs = NULL;
146 mac->ops.dmac_config_tcs = NULL;
148 return IXGBE_SUCCESS;
152 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
154 * @hw: pointer to hardware structure
156 * This function returns true if the device supports flow control
157 * autonegotiation, and false if it does not.
160 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
162 bool supported = false;
163 ixgbe_link_speed speed;
166 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
168 switch (hw->phy.media_type) {
169 case ixgbe_media_type_fiber_qsfp:
170 case ixgbe_media_type_fiber:
171 hw->mac.ops.check_link(hw, &speed, &link_up, false);
172 /* if link is down, assume supported */
174 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
179 case ixgbe_media_type_backplane:
182 case ixgbe_media_type_copper:
183 /* only some copper devices support flow control autoneg */
184 switch (hw->device_id) {
185 case IXGBE_DEV_ID_82599_T3_LOM:
186 case IXGBE_DEV_ID_X540T:
187 case IXGBE_DEV_ID_X540T1:
188 case IXGBE_DEV_ID_X550T:
189 case IXGBE_DEV_ID_X550T1:
190 case IXGBE_DEV_ID_X550EM_X_10G_T:
200 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
201 "Device %x does not support flow control autoneg",
207 * ixgbe_setup_fc_generic - Set up flow control
208 * @hw: pointer to hardware structure
210 * Called at init time to set up flow control.
212 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
214 s32 ret_val = IXGBE_SUCCESS;
215 u32 reg = 0, reg_bp = 0;
219 DEBUGFUNC("ixgbe_setup_fc_generic");
221 /* Validate the requested mode */
222 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
223 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
224 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
225 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
230 * 10gig parts do not have a word in the EEPROM to determine the
231 * default flow control setting, so we explicitly set it to full.
233 if (hw->fc.requested_mode == ixgbe_fc_default)
234 hw->fc.requested_mode = ixgbe_fc_full;
237 * Set up the 1G and 10G flow control advertisement registers so the
238 * HW will be able to do fc autoneg once the cable is plugged in. If
239 * we link at 10G, the 1G advertisement is harmless and vice versa.
241 switch (hw->phy.media_type) {
242 case ixgbe_media_type_backplane:
243 /* some MAC's need RMW protection on AUTOC */
244 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
245 if (ret_val != IXGBE_SUCCESS)
248 /* only backplane uses autoc so fall though */
249 case ixgbe_media_type_fiber_qsfp:
250 case ixgbe_media_type_fiber:
251 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
254 case ixgbe_media_type_copper:
255 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
256 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
263 * The possible values of fc.requested_mode are:
264 * 0: Flow control is completely disabled
265 * 1: Rx flow control is enabled (we can receive pause frames,
266 * but not send pause frames).
267 * 2: Tx flow control is enabled (we can send pause frames but
268 * we do not support receiving pause frames).
269 * 3: Both Rx and Tx flow control (symmetric) are enabled.
272 switch (hw->fc.requested_mode) {
274 /* Flow control completely disabled by software override. */
275 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
276 if (hw->phy.media_type == ixgbe_media_type_backplane)
277 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
278 IXGBE_AUTOC_ASM_PAUSE);
279 else if (hw->phy.media_type == ixgbe_media_type_copper)
280 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
282 case ixgbe_fc_tx_pause:
284 * Tx Flow control is enabled, and Rx Flow control is
285 * disabled by software override.
287 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
288 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
289 if (hw->phy.media_type == ixgbe_media_type_backplane) {
290 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
291 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
292 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
293 reg_cu |= IXGBE_TAF_ASM_PAUSE;
294 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
297 case ixgbe_fc_rx_pause:
299 * Rx Flow control is enabled and Tx Flow control is
300 * disabled by software override. Since there really
301 * isn't a way to advertise that we are capable of RX
302 * Pause ONLY, we will advertise that we support both
303 * symmetric and asymmetric Rx PAUSE, as such we fall
304 * through to the fc_full statement. Later, we will
305 * disable the adapter's ability to send PAUSE frames.
308 /* Flow control (both Rx and Tx) is enabled by SW override. */
309 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
310 if (hw->phy.media_type == ixgbe_media_type_backplane)
311 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
312 IXGBE_AUTOC_ASM_PAUSE;
313 else if (hw->phy.media_type == ixgbe_media_type_copper)
314 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
317 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
318 "Flow control param set incorrectly\n");
319 ret_val = IXGBE_ERR_CONFIG;
324 if (hw->mac.type < ixgbe_mac_X540) {
326 * Enable auto-negotiation between the MAC & PHY;
327 * the MAC will advertise clause 37 flow control.
329 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
330 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
332 /* Disable AN timeout */
333 if (hw->fc.strict_ieee)
334 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
336 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
337 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
341 * AUTOC restart handles negotiation of 1G and 10G on backplane
342 * and copper. There is no need to set the PCS1GCTL register.
345 if (hw->phy.media_type == ixgbe_media_type_backplane) {
346 reg_bp |= IXGBE_AUTOC_AN_RESTART;
347 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
350 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
351 (ixgbe_device_supports_autoneg_fc(hw))) {
352 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
353 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
356 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
362 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
363 * @hw: pointer to hardware structure
365 * Starts the hardware by filling the bus info structure and media type, clears
366 * all on chip counters, initializes receive address registers, multicast
367 * table, VLAN filter table, calls routine to set up link and flow control
368 * settings, and leaves transmit and receive units disabled and uninitialized
370 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
375 DEBUGFUNC("ixgbe_start_hw_generic");
377 /* Set the media type */
378 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
380 /* PHY ops initialization must be done in reset_hw() */
382 /* Clear the VLAN filter table */
383 hw->mac.ops.clear_vfta(hw);
385 /* Clear statistics registers */
386 hw->mac.ops.clear_hw_cntrs(hw);
388 /* Set No Snoop Disable */
389 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
390 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
391 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
392 IXGBE_WRITE_FLUSH(hw);
394 /* Setup flow control */
395 ret_val = ixgbe_setup_fc(hw);
396 if (ret_val != IXGBE_SUCCESS)
399 /* Clear adapter stopped flag */
400 hw->adapter_stopped = false;
407 * ixgbe_start_hw_gen2 - Init sequence for common device family
408 * @hw: pointer to hw structure
410 * Performs the init sequence common to the second generation
412 * Devices in the second generation:
416 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
421 /* Clear the rate limiters */
422 for (i = 0; i < hw->mac.max_tx_queues; i++) {
423 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
424 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
426 IXGBE_WRITE_FLUSH(hw);
428 /* Disable relaxed ordering */
429 for (i = 0; i < hw->mac.max_tx_queues; i++) {
430 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
431 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
432 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
435 for (i = 0; i < hw->mac.max_rx_queues; i++) {
436 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
437 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
438 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
439 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
442 return IXGBE_SUCCESS;
446 * ixgbe_init_hw_generic - Generic hardware initialization
447 * @hw: pointer to hardware structure
449 * Initialize the hardware by resetting the hardware, filling the bus info
450 * structure and media type, clears all on chip counters, initializes receive
451 * address registers, multicast table, VLAN filter table, calls routine to set
452 * up link and flow control settings, and leaves transmit and receive units
453 * disabled and uninitialized
455 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
459 DEBUGFUNC("ixgbe_init_hw_generic");
461 /* Reset the hardware */
462 status = hw->mac.ops.reset_hw(hw);
464 if (status == IXGBE_SUCCESS) {
466 status = hw->mac.ops.start_hw(hw);
473 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
474 * @hw: pointer to hardware structure
476 * Clears all hardware statistics counters by reading them from the hardware
477 * Statistics counters are clear on read.
479 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
483 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
485 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
486 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
487 IXGBE_READ_REG(hw, IXGBE_ERRBC);
488 IXGBE_READ_REG(hw, IXGBE_MSPDC);
489 for (i = 0; i < 8; i++)
490 IXGBE_READ_REG(hw, IXGBE_MPC(i));
492 IXGBE_READ_REG(hw, IXGBE_MLFC);
493 IXGBE_READ_REG(hw, IXGBE_MRFC);
494 IXGBE_READ_REG(hw, IXGBE_RLEC);
495 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
496 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
497 if (hw->mac.type >= ixgbe_mac_82599EB) {
498 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
499 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
501 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
502 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
505 for (i = 0; i < 8; i++) {
506 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
507 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
508 if (hw->mac.type >= ixgbe_mac_82599EB) {
509 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
510 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
512 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
513 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
516 if (hw->mac.type >= ixgbe_mac_82599EB)
517 for (i = 0; i < 8; i++)
518 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
519 IXGBE_READ_REG(hw, IXGBE_PRC64);
520 IXGBE_READ_REG(hw, IXGBE_PRC127);
521 IXGBE_READ_REG(hw, IXGBE_PRC255);
522 IXGBE_READ_REG(hw, IXGBE_PRC511);
523 IXGBE_READ_REG(hw, IXGBE_PRC1023);
524 IXGBE_READ_REG(hw, IXGBE_PRC1522);
525 IXGBE_READ_REG(hw, IXGBE_GPRC);
526 IXGBE_READ_REG(hw, IXGBE_BPRC);
527 IXGBE_READ_REG(hw, IXGBE_MPRC);
528 IXGBE_READ_REG(hw, IXGBE_GPTC);
529 IXGBE_READ_REG(hw, IXGBE_GORCL);
530 IXGBE_READ_REG(hw, IXGBE_GORCH);
531 IXGBE_READ_REG(hw, IXGBE_GOTCL);
532 IXGBE_READ_REG(hw, IXGBE_GOTCH);
533 if (hw->mac.type == ixgbe_mac_82598EB)
534 for (i = 0; i < 8; i++)
535 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
536 IXGBE_READ_REG(hw, IXGBE_RUC);
537 IXGBE_READ_REG(hw, IXGBE_RFC);
538 IXGBE_READ_REG(hw, IXGBE_ROC);
539 IXGBE_READ_REG(hw, IXGBE_RJC);
540 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
541 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
542 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
543 IXGBE_READ_REG(hw, IXGBE_TORL);
544 IXGBE_READ_REG(hw, IXGBE_TORH);
545 IXGBE_READ_REG(hw, IXGBE_TPR);
546 IXGBE_READ_REG(hw, IXGBE_TPT);
547 IXGBE_READ_REG(hw, IXGBE_PTC64);
548 IXGBE_READ_REG(hw, IXGBE_PTC127);
549 IXGBE_READ_REG(hw, IXGBE_PTC255);
550 IXGBE_READ_REG(hw, IXGBE_PTC511);
551 IXGBE_READ_REG(hw, IXGBE_PTC1023);
552 IXGBE_READ_REG(hw, IXGBE_PTC1522);
553 IXGBE_READ_REG(hw, IXGBE_MPTC);
554 IXGBE_READ_REG(hw, IXGBE_BPTC);
555 for (i = 0; i < 16; i++) {
556 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
557 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
558 if (hw->mac.type >= ixgbe_mac_82599EB) {
559 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
560 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
561 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
562 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
563 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
565 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
566 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
570 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
572 ixgbe_identify_phy(hw);
573 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
574 IXGBE_MDIO_PCS_DEV_TYPE, &i);
575 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
576 IXGBE_MDIO_PCS_DEV_TYPE, &i);
577 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
578 IXGBE_MDIO_PCS_DEV_TYPE, &i);
579 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
580 IXGBE_MDIO_PCS_DEV_TYPE, &i);
583 return IXGBE_SUCCESS;
587 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
588 * @hw: pointer to hardware structure
589 * @pba_num: stores the part number string from the EEPROM
590 * @pba_num_size: part number string buffer length
592 * Reads the part number string from the EEPROM.
594 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
603 DEBUGFUNC("ixgbe_read_pba_string_generic");
605 if (pba_num == NULL) {
606 DEBUGOUT("PBA string buffer was null\n");
607 return IXGBE_ERR_INVALID_ARGUMENT;
610 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
612 DEBUGOUT("NVM Read Error\n");
616 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
618 DEBUGOUT("NVM Read Error\n");
623 * if data is not ptr guard the PBA must be in legacy format which
624 * means pba_ptr is actually our second data word for the PBA number
625 * and we can decode it into an ascii string
627 if (data != IXGBE_PBANUM_PTR_GUARD) {
628 DEBUGOUT("NVM PBA number is not stored as string\n");
630 /* we will need 11 characters to store the PBA */
631 if (pba_num_size < 11) {
632 DEBUGOUT("PBA string buffer too small\n");
633 return IXGBE_ERR_NO_SPACE;
636 /* extract hex string from data and pba_ptr */
637 pba_num[0] = (data >> 12) & 0xF;
638 pba_num[1] = (data >> 8) & 0xF;
639 pba_num[2] = (data >> 4) & 0xF;
640 pba_num[3] = data & 0xF;
641 pba_num[4] = (pba_ptr >> 12) & 0xF;
642 pba_num[5] = (pba_ptr >> 8) & 0xF;
645 pba_num[8] = (pba_ptr >> 4) & 0xF;
646 pba_num[9] = pba_ptr & 0xF;
648 /* put a null character on the end of our string */
651 /* switch all the data but the '-' to hex char */
652 for (offset = 0; offset < 10; offset++) {
653 if (pba_num[offset] < 0xA)
654 pba_num[offset] += '0';
655 else if (pba_num[offset] < 0x10)
656 pba_num[offset] += 'A' - 0xA;
659 return IXGBE_SUCCESS;
662 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
664 DEBUGOUT("NVM Read Error\n");
668 if (length == 0xFFFF || length == 0) {
669 DEBUGOUT("NVM PBA number section invalid length\n");
670 return IXGBE_ERR_PBA_SECTION;
673 /* check if pba_num buffer is big enough */
674 if (pba_num_size < (((u32)length * 2) - 1)) {
675 DEBUGOUT("PBA string buffer too small\n");
676 return IXGBE_ERR_NO_SPACE;
679 /* trim pba length from start of string */
683 for (offset = 0; offset < length; offset++) {
684 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
686 DEBUGOUT("NVM Read Error\n");
689 pba_num[offset * 2] = (u8)(data >> 8);
690 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
692 pba_num[offset * 2] = '\0';
694 return IXGBE_SUCCESS;
698 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
699 * @hw: pointer to hardware structure
700 * @pba_num: stores the part number from the EEPROM
702 * Reads the part number from the EEPROM.
704 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
709 DEBUGFUNC("ixgbe_read_pba_num_generic");
711 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
713 DEBUGOUT("NVM Read Error\n");
715 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
716 DEBUGOUT("NVM Not supported\n");
717 return IXGBE_NOT_IMPLEMENTED;
719 *pba_num = (u32)(data << 16);
721 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
723 DEBUGOUT("NVM Read Error\n");
728 return IXGBE_SUCCESS;
733 * @hw: pointer to the HW structure
734 * @eeprom_buf: optional pointer to EEPROM image
735 * @eeprom_buf_size: size of EEPROM image in words
736 * @max_pba_block_size: PBA block size limit
737 * @pba: pointer to output PBA structure
739 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
740 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
743 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
744 u32 eeprom_buf_size, u16 max_pba_block_size,
745 struct ixgbe_pba *pba)
751 return IXGBE_ERR_PARAM;
753 if (eeprom_buf == NULL) {
754 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
759 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
760 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
761 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
763 return IXGBE_ERR_PARAM;
767 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
768 if (pba->pba_block == NULL)
769 return IXGBE_ERR_PARAM;
771 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
777 if (pba_block_size > max_pba_block_size)
778 return IXGBE_ERR_PARAM;
780 if (eeprom_buf == NULL) {
781 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
787 if (eeprom_buf_size > (u32)(pba->word[1] +
789 memcpy(pba->pba_block,
790 &eeprom_buf[pba->word[1]],
791 pba_block_size * sizeof(u16));
793 return IXGBE_ERR_PARAM;
798 return IXGBE_SUCCESS;
802 * ixgbe_write_pba_raw
803 * @hw: pointer to the HW structure
804 * @eeprom_buf: optional pointer to EEPROM image
805 * @eeprom_buf_size: size of EEPROM image in words
806 * @pba: pointer to PBA structure
808 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
809 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
812 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
813 u32 eeprom_buf_size, struct ixgbe_pba *pba)
818 return IXGBE_ERR_PARAM;
820 if (eeprom_buf == NULL) {
821 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
826 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
827 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
828 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
830 return IXGBE_ERR_PARAM;
834 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
835 if (pba->pba_block == NULL)
836 return IXGBE_ERR_PARAM;
838 if (eeprom_buf == NULL) {
839 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
845 if (eeprom_buf_size > (u32)(pba->word[1] +
846 pba->pba_block[0])) {
847 memcpy(&eeprom_buf[pba->word[1]],
849 pba->pba_block[0] * sizeof(u16));
851 return IXGBE_ERR_PARAM;
856 return IXGBE_SUCCESS;
860 * ixgbe_get_pba_block_size
861 * @hw: pointer to the HW structure
862 * @eeprom_buf: optional pointer to EEPROM image
863 * @eeprom_buf_size: size of EEPROM image in words
864 * @pba_data_size: pointer to output variable
866 * Returns the size of the PBA block in words. Function operates on EEPROM
867 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
871 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
872 u32 eeprom_buf_size, u16 *pba_block_size)
878 DEBUGFUNC("ixgbe_get_pba_block_size");
880 if (eeprom_buf == NULL) {
881 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
886 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
887 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
888 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
890 return IXGBE_ERR_PARAM;
894 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
895 if (eeprom_buf == NULL) {
896 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
901 if (eeprom_buf_size > pba_word[1])
902 length = eeprom_buf[pba_word[1] + 0];
904 return IXGBE_ERR_PARAM;
907 if (length == 0xFFFF || length == 0)
908 return IXGBE_ERR_PBA_SECTION;
910 /* PBA number in legacy format, there is no PBA Block. */
914 if (pba_block_size != NULL)
915 *pba_block_size = length;
917 return IXGBE_SUCCESS;
921 * ixgbe_get_mac_addr_generic - Generic get MAC address
922 * @hw: pointer to hardware structure
923 * @mac_addr: Adapter MAC address
925 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
926 * A reset of the adapter must be performed prior to calling this function
927 * in order for the MAC address to have been loaded from the EEPROM into RAR0
929 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
935 DEBUGFUNC("ixgbe_get_mac_addr_generic");
937 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
938 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
940 for (i = 0; i < 4; i++)
941 mac_addr[i] = (u8)(rar_low >> (i*8));
943 for (i = 0; i < 2; i++)
944 mac_addr[i+4] = (u8)(rar_high >> (i*8));
946 return IXGBE_SUCCESS;
950 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
951 * @hw: pointer to hardware structure
952 * @link_status: the link status returned by the PCI config space
954 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
956 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
958 struct ixgbe_mac_info *mac = &hw->mac;
960 if (hw->bus.type == ixgbe_bus_type_unknown)
961 hw->bus.type = ixgbe_bus_type_pci_express;
963 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
964 case IXGBE_PCI_LINK_WIDTH_1:
965 hw->bus.width = ixgbe_bus_width_pcie_x1;
967 case IXGBE_PCI_LINK_WIDTH_2:
968 hw->bus.width = ixgbe_bus_width_pcie_x2;
970 case IXGBE_PCI_LINK_WIDTH_4:
971 hw->bus.width = ixgbe_bus_width_pcie_x4;
973 case IXGBE_PCI_LINK_WIDTH_8:
974 hw->bus.width = ixgbe_bus_width_pcie_x8;
977 hw->bus.width = ixgbe_bus_width_unknown;
981 switch (link_status & IXGBE_PCI_LINK_SPEED) {
982 case IXGBE_PCI_LINK_SPEED_2500:
983 hw->bus.speed = ixgbe_bus_speed_2500;
985 case IXGBE_PCI_LINK_SPEED_5000:
986 hw->bus.speed = ixgbe_bus_speed_5000;
988 case IXGBE_PCI_LINK_SPEED_8000:
989 hw->bus.speed = ixgbe_bus_speed_8000;
992 hw->bus.speed = ixgbe_bus_speed_unknown;
996 mac->ops.set_lan_id(hw);
1000 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1001 * @hw: pointer to hardware structure
1003 * Gets the PCI bus info (speed, width, type) then calls helper function to
1004 * store this data within the ixgbe_hw structure.
1006 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1010 DEBUGFUNC("ixgbe_get_bus_info_generic");
1012 /* Get the negotiated link width and speed from PCI config space */
1013 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1015 ixgbe_set_pci_config_data_generic(hw, link_status);
1017 return IXGBE_SUCCESS;
1021 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1022 * @hw: pointer to the HW structure
1024 * Determines the LAN function id by reading memory-mapped registers and swaps
1025 * the port value if requested, and set MAC instance for devices that share
1028 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1030 struct ixgbe_bus_info *bus = &hw->bus;
1034 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1036 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1037 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1038 bus->lan_id = (u8)bus->func;
1040 /* check for a port swap */
1041 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1042 if (reg & IXGBE_FACTPS_LFS)
1045 /* Get MAC instance from EEPROM for configuring CS4227 */
1046 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1047 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1048 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1049 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1054 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1055 * @hw: pointer to hardware structure
1057 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1058 * disables transmit and receive units. The adapter_stopped flag is used by
1059 * the shared code and drivers to determine if the adapter is in a stopped
1060 * state and should not touch the hardware.
1062 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1067 DEBUGFUNC("ixgbe_stop_adapter_generic");
1070 * Set the adapter_stopped flag so other driver functions stop touching
1073 hw->adapter_stopped = true;
1075 /* Disable the receive unit */
1076 ixgbe_disable_rx(hw);
1078 /* Clear interrupt mask to stop interrupts from being generated */
1079 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1081 /* Clear any pending interrupts, flush previous writes */
1082 IXGBE_READ_REG(hw, IXGBE_EICR);
1084 /* Disable the transmit unit. Each queue must be disabled. */
1085 for (i = 0; i < hw->mac.max_tx_queues; i++)
1086 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1088 /* Disable the receive unit by stopping each queue */
1089 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1090 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1091 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1092 reg_val |= IXGBE_RXDCTL_SWFLSH;
1093 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1096 /* flush all queues disables */
1097 IXGBE_WRITE_FLUSH(hw);
1101 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1102 * access and verify no pending requests
1104 return ixgbe_disable_pcie_master(hw);
1108 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1109 * @hw: pointer to hardware structure
1110 * @index: led number to turn on
1112 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1114 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1116 DEBUGFUNC("ixgbe_led_on_generic");
1118 /* To turn on the LED, set mode to ON. */
1119 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1120 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1121 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1122 IXGBE_WRITE_FLUSH(hw);
1124 return IXGBE_SUCCESS;
1128 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1129 * @hw: pointer to hardware structure
1130 * @index: led number to turn off
1132 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1134 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1136 DEBUGFUNC("ixgbe_led_off_generic");
1138 /* To turn off the LED, set mode to OFF. */
1139 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1140 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1141 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1142 IXGBE_WRITE_FLUSH(hw);
1144 return IXGBE_SUCCESS;
1148 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1149 * @hw: pointer to hardware structure
1151 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1152 * ixgbe_hw struct in order to set up EEPROM access.
1154 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1156 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1160 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1162 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1163 eeprom->type = ixgbe_eeprom_none;
1164 /* Set default semaphore delay to 10ms which is a well
1166 eeprom->semaphore_delay = 10;
1167 /* Clear EEPROM page size, it will be initialized as needed */
1168 eeprom->word_page_size = 0;
1171 * Check for EEPROM present first.
1172 * If not present leave as none
1174 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1175 if (eec & IXGBE_EEC_PRES) {
1176 eeprom->type = ixgbe_eeprom_spi;
1179 * SPI EEPROM is assumed here. This code would need to
1180 * change if a future EEPROM is not SPI.
1182 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1183 IXGBE_EEC_SIZE_SHIFT);
1184 eeprom->word_size = 1 << (eeprom_size +
1185 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1188 if (eec & IXGBE_EEC_ADDR_SIZE)
1189 eeprom->address_bits = 16;
1191 eeprom->address_bits = 8;
1192 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1193 "%d\n", eeprom->type, eeprom->word_size,
1194 eeprom->address_bits);
1197 return IXGBE_SUCCESS;
1201 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1202 * @hw: pointer to hardware structure
1203 * @offset: offset within the EEPROM to write
1204 * @words: number of word(s)
1205 * @data: 16 bit word(s) to write to EEPROM
1207 * Reads 16 bit word(s) from EEPROM through bit-bang method
1209 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1210 u16 words, u16 *data)
1212 s32 status = IXGBE_SUCCESS;
1215 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1217 hw->eeprom.ops.init_params(hw);
1220 status = IXGBE_ERR_INVALID_ARGUMENT;
1224 if (offset + words > hw->eeprom.word_size) {
1225 status = IXGBE_ERR_EEPROM;
1230 * The EEPROM page size cannot be queried from the chip. We do lazy
1231 * initialization. It is worth to do that when we write large buffer.
1233 if ((hw->eeprom.word_page_size == 0) &&
1234 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1235 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1238 * We cannot hold synchronization semaphores for too long
1239 * to avoid other entity starvation. However it is more efficient
1240 * to read in bursts than synchronizing access for each word.
1242 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1243 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1244 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1245 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1248 if (status != IXGBE_SUCCESS)
1257 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1258 * @hw: pointer to hardware structure
1259 * @offset: offset within the EEPROM to be written to
1260 * @words: number of word(s)
1261 * @data: 16 bit word(s) to be written to the EEPROM
1263 * If ixgbe_eeprom_update_checksum is not called after this function, the
1264 * EEPROM will most likely contain an invalid checksum.
1266 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1267 u16 words, u16 *data)
1273 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1275 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1277 /* Prepare the EEPROM for writing */
1278 status = ixgbe_acquire_eeprom(hw);
1280 if (status == IXGBE_SUCCESS) {
1281 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1282 ixgbe_release_eeprom(hw);
1283 status = IXGBE_ERR_EEPROM;
1287 if (status == IXGBE_SUCCESS) {
1288 for (i = 0; i < words; i++) {
1289 ixgbe_standby_eeprom(hw);
1291 /* Send the WRITE ENABLE command (8 bit opcode ) */
1292 ixgbe_shift_out_eeprom_bits(hw,
1293 IXGBE_EEPROM_WREN_OPCODE_SPI,
1294 IXGBE_EEPROM_OPCODE_BITS);
1296 ixgbe_standby_eeprom(hw);
1299 * Some SPI eeproms use the 8th address bit embedded
1302 if ((hw->eeprom.address_bits == 8) &&
1303 ((offset + i) >= 128))
1304 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1306 /* Send the Write command (8-bit opcode + addr) */
1307 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1308 IXGBE_EEPROM_OPCODE_BITS);
1309 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1310 hw->eeprom.address_bits);
1312 page_size = hw->eeprom.word_page_size;
1314 /* Send the data in burst via SPI*/
1317 word = (word >> 8) | (word << 8);
1318 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1323 /* do not wrap around page */
1324 if (((offset + i) & (page_size - 1)) ==
1327 } while (++i < words);
1329 ixgbe_standby_eeprom(hw);
1332 /* Done with writing - release the EEPROM */
1333 ixgbe_release_eeprom(hw);
1340 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1341 * @hw: pointer to hardware structure
1342 * @offset: offset within the EEPROM to be written to
1343 * @data: 16 bit word to be written to the EEPROM
1345 * If ixgbe_eeprom_update_checksum is not called after this function, the
1346 * EEPROM will most likely contain an invalid checksum.
1348 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1352 DEBUGFUNC("ixgbe_write_eeprom_generic");
1354 hw->eeprom.ops.init_params(hw);
1356 if (offset >= hw->eeprom.word_size) {
1357 status = IXGBE_ERR_EEPROM;
1361 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1368 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1369 * @hw: pointer to hardware structure
1370 * @offset: offset within the EEPROM to be read
1371 * @data: read 16 bit words(s) from EEPROM
1372 * @words: number of word(s)
1374 * Reads 16 bit word(s) from EEPROM through bit-bang method
1376 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1377 u16 words, u16 *data)
1379 s32 status = IXGBE_SUCCESS;
1382 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1384 hw->eeprom.ops.init_params(hw);
1387 status = IXGBE_ERR_INVALID_ARGUMENT;
1391 if (offset + words > hw->eeprom.word_size) {
1392 status = IXGBE_ERR_EEPROM;
1397 * We cannot hold synchronization semaphores for too long
1398 * to avoid other entity starvation. However it is more efficient
1399 * to read in bursts than synchronizing access for each word.
1401 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1402 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1403 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1405 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1408 if (status != IXGBE_SUCCESS)
1417 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1418 * @hw: pointer to hardware structure
1419 * @offset: offset within the EEPROM to be read
1420 * @words: number of word(s)
1421 * @data: read 16 bit word(s) from EEPROM
1423 * Reads 16 bit word(s) from EEPROM through bit-bang method
1425 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1426 u16 words, u16 *data)
1430 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1433 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1435 /* Prepare the EEPROM for reading */
1436 status = ixgbe_acquire_eeprom(hw);
1438 if (status == IXGBE_SUCCESS) {
1439 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1440 ixgbe_release_eeprom(hw);
1441 status = IXGBE_ERR_EEPROM;
1445 if (status == IXGBE_SUCCESS) {
1446 for (i = 0; i < words; i++) {
1447 ixgbe_standby_eeprom(hw);
1449 * Some SPI eeproms use the 8th address bit embedded
1452 if ((hw->eeprom.address_bits == 8) &&
1453 ((offset + i) >= 128))
1454 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1456 /* Send the READ command (opcode + addr) */
1457 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1458 IXGBE_EEPROM_OPCODE_BITS);
1459 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1460 hw->eeprom.address_bits);
1462 /* Read the data. */
1463 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1464 data[i] = (word_in >> 8) | (word_in << 8);
1467 /* End this read operation */
1468 ixgbe_release_eeprom(hw);
1475 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1476 * @hw: pointer to hardware structure
1477 * @offset: offset within the EEPROM to be read
1478 * @data: read 16 bit value from EEPROM
1480 * Reads 16 bit value from EEPROM through bit-bang method
1482 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1487 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1489 hw->eeprom.ops.init_params(hw);
1491 if (offset >= hw->eeprom.word_size) {
1492 status = IXGBE_ERR_EEPROM;
1496 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1503 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1504 * @hw: pointer to hardware structure
1505 * @offset: offset of word in the EEPROM to read
1506 * @words: number of word(s)
1507 * @data: 16 bit word(s) from the EEPROM
1509 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1511 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1512 u16 words, u16 *data)
1515 s32 status = IXGBE_SUCCESS;
1518 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1520 hw->eeprom.ops.init_params(hw);
1523 status = IXGBE_ERR_INVALID_ARGUMENT;
1524 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1528 if (offset >= hw->eeprom.word_size) {
1529 status = IXGBE_ERR_EEPROM;
1530 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1534 for (i = 0; i < words; i++) {
1535 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1536 IXGBE_EEPROM_RW_REG_START;
1538 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1539 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1541 if (status == IXGBE_SUCCESS) {
1542 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1543 IXGBE_EEPROM_RW_REG_DATA);
1545 DEBUGOUT("Eeprom read timed out\n");
1554 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1555 * @hw: pointer to hardware structure
1556 * @offset: offset within the EEPROM to be used as a scratch pad
1558 * Discover EEPROM page size by writing marching data at given offset.
1559 * This function is called only when we are writing a new large buffer
1560 * at given offset so the data would be overwritten anyway.
1562 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1565 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1566 s32 status = IXGBE_SUCCESS;
1569 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1571 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1574 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1575 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1576 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1577 hw->eeprom.word_page_size = 0;
1578 if (status != IXGBE_SUCCESS)
1581 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1582 if (status != IXGBE_SUCCESS)
1586 * When writing in burst more than the actual page size
1587 * EEPROM address wraps around current page.
1589 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1591 DEBUGOUT1("Detected EEPROM page size = %d words.",
1592 hw->eeprom.word_page_size);
1598 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1599 * @hw: pointer to hardware structure
1600 * @offset: offset of word in the EEPROM to read
1601 * @data: word read from the EEPROM
1603 * Reads a 16 bit word from the EEPROM using the EERD register.
1605 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1607 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1611 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1612 * @hw: pointer to hardware structure
1613 * @offset: offset of word in the EEPROM to write
1614 * @words: number of word(s)
1615 * @data: word(s) write to the EEPROM
1617 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1619 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1620 u16 words, u16 *data)
1623 s32 status = IXGBE_SUCCESS;
1626 DEBUGFUNC("ixgbe_write_eewr_generic");
1628 hw->eeprom.ops.init_params(hw);
1631 status = IXGBE_ERR_INVALID_ARGUMENT;
1632 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1636 if (offset >= hw->eeprom.word_size) {
1637 status = IXGBE_ERR_EEPROM;
1638 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1642 for (i = 0; i < words; i++) {
1643 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1644 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1645 IXGBE_EEPROM_RW_REG_START;
1647 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1648 if (status != IXGBE_SUCCESS) {
1649 DEBUGOUT("Eeprom write EEWR timed out\n");
1653 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1655 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1656 if (status != IXGBE_SUCCESS) {
1657 DEBUGOUT("Eeprom write EEWR timed out\n");
1667 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1668 * @hw: pointer to hardware structure
1669 * @offset: offset of word in the EEPROM to write
1670 * @data: word write to the EEPROM
1672 * Write a 16 bit word to the EEPROM using the EEWR register.
1674 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1676 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1680 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1681 * @hw: pointer to hardware structure
1682 * @ee_reg: EEPROM flag for polling
1684 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1685 * read or write is done respectively.
1687 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1691 s32 status = IXGBE_ERR_EEPROM;
1693 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1695 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1696 if (ee_reg == IXGBE_NVM_POLL_READ)
1697 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1699 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1701 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1702 status = IXGBE_SUCCESS;
1708 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1709 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1710 "EEPROM read/write done polling timed out");
1716 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1717 * @hw: pointer to hardware structure
1719 * Prepares EEPROM for access using bit-bang method. This function should
1720 * be called before issuing a command to the EEPROM.
1722 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1724 s32 status = IXGBE_SUCCESS;
1728 DEBUGFUNC("ixgbe_acquire_eeprom");
1730 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1732 status = IXGBE_ERR_SWFW_SYNC;
1734 if (status == IXGBE_SUCCESS) {
1735 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1737 /* Request EEPROM Access */
1738 eec |= IXGBE_EEC_REQ;
1739 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1741 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1742 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1743 if (eec & IXGBE_EEC_GNT)
1748 /* Release if grant not acquired */
1749 if (!(eec & IXGBE_EEC_GNT)) {
1750 eec &= ~IXGBE_EEC_REQ;
1751 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1752 DEBUGOUT("Could not acquire EEPROM grant\n");
1754 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1755 status = IXGBE_ERR_EEPROM;
1758 /* Setup EEPROM for Read/Write */
1759 if (status == IXGBE_SUCCESS) {
1760 /* Clear CS and SK */
1761 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1762 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1763 IXGBE_WRITE_FLUSH(hw);
1771 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1772 * @hw: pointer to hardware structure
1774 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1776 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1778 s32 status = IXGBE_ERR_EEPROM;
1783 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1786 /* Get SMBI software semaphore between device drivers first */
1787 for (i = 0; i < timeout; i++) {
1789 * If the SMBI bit is 0 when we read it, then the bit will be
1790 * set and we have the semaphore
1792 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1793 if (!(swsm & IXGBE_SWSM_SMBI)) {
1794 status = IXGBE_SUCCESS;
1801 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1804 * this release is particularly important because our attempts
1805 * above to get the semaphore may have succeeded, and if there
1806 * was a timeout, we should unconditionally clear the semaphore
1807 * bits to free the driver to make progress
1809 ixgbe_release_eeprom_semaphore(hw);
1814 * If the SMBI bit is 0 when we read it, then the bit will be
1815 * set and we have the semaphore
1817 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1818 if (!(swsm & IXGBE_SWSM_SMBI))
1819 status = IXGBE_SUCCESS;
1822 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1823 if (status == IXGBE_SUCCESS) {
1824 for (i = 0; i < timeout; i++) {
1825 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1827 /* Set the SW EEPROM semaphore bit to request access */
1828 swsm |= IXGBE_SWSM_SWESMBI;
1829 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1832 * If we set the bit successfully then we got the
1835 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1836 if (swsm & IXGBE_SWSM_SWESMBI)
1843 * Release semaphores and return error if SW EEPROM semaphore
1844 * was not granted because we don't have access to the EEPROM
1847 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1848 "SWESMBI Software EEPROM semaphore not granted.\n");
1849 ixgbe_release_eeprom_semaphore(hw);
1850 status = IXGBE_ERR_EEPROM;
1853 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1854 "Software semaphore SMBI between device drivers "
1862 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1863 * @hw: pointer to hardware structure
1865 * This function clears hardware semaphore bits.
1867 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1871 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1873 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1875 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1876 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1877 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1878 IXGBE_WRITE_FLUSH(hw);
1882 * ixgbe_ready_eeprom - Polls for EEPROM ready
1883 * @hw: pointer to hardware structure
1885 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1887 s32 status = IXGBE_SUCCESS;
1891 DEBUGFUNC("ixgbe_ready_eeprom");
1894 * Read "Status Register" repeatedly until the LSB is cleared. The
1895 * EEPROM will signal that the command has been completed by clearing
1896 * bit 0 of the internal status register. If it's not cleared within
1897 * 5 milliseconds, then error out.
1899 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1900 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1901 IXGBE_EEPROM_OPCODE_BITS);
1902 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1903 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1907 ixgbe_standby_eeprom(hw);
1911 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1912 * devices (and only 0-5mSec on 5V devices)
1914 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1915 DEBUGOUT("SPI EEPROM Status error\n");
1916 status = IXGBE_ERR_EEPROM;
1923 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1924 * @hw: pointer to hardware structure
1926 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1930 DEBUGFUNC("ixgbe_standby_eeprom");
1932 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1934 /* Toggle CS to flush commands */
1935 eec |= IXGBE_EEC_CS;
1936 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1937 IXGBE_WRITE_FLUSH(hw);
1939 eec &= ~IXGBE_EEC_CS;
1940 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1941 IXGBE_WRITE_FLUSH(hw);
1946 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1947 * @hw: pointer to hardware structure
1948 * @data: data to send to the EEPROM
1949 * @count: number of bits to shift out
1951 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1958 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1960 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1963 * Mask is used to shift "count" bits of "data" out to the EEPROM
1964 * one bit at a time. Determine the starting bit based on count
1966 mask = 0x01 << (count - 1);
1968 for (i = 0; i < count; i++) {
1970 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1971 * "1", and then raising and then lowering the clock (the SK
1972 * bit controls the clock input to the EEPROM). A "0" is
1973 * shifted out to the EEPROM by setting "DI" to "0" and then
1974 * raising and then lowering the clock.
1977 eec |= IXGBE_EEC_DI;
1979 eec &= ~IXGBE_EEC_DI;
1981 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1982 IXGBE_WRITE_FLUSH(hw);
1986 ixgbe_raise_eeprom_clk(hw, &eec);
1987 ixgbe_lower_eeprom_clk(hw, &eec);
1990 * Shift mask to signify next bit of data to shift in to the
1996 /* We leave the "DI" bit set to "0" when we leave this routine. */
1997 eec &= ~IXGBE_EEC_DI;
1998 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1999 IXGBE_WRITE_FLUSH(hw);
2003 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2004 * @hw: pointer to hardware structure
2006 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2012 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2015 * In order to read a register from the EEPROM, we need to shift
2016 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2017 * the clock input to the EEPROM (setting the SK bit), and then reading
2018 * the value of the "DO" bit. During this "shifting in" process the
2019 * "DI" bit should always be clear.
2021 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2023 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2025 for (i = 0; i < count; i++) {
2027 ixgbe_raise_eeprom_clk(hw, &eec);
2029 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2031 eec &= ~(IXGBE_EEC_DI);
2032 if (eec & IXGBE_EEC_DO)
2035 ixgbe_lower_eeprom_clk(hw, &eec);
2042 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2043 * @hw: pointer to hardware structure
2044 * @eec: EEC register's current value
2046 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2048 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2051 * Raise the clock input to the EEPROM
2052 * (setting the SK bit), then delay
2054 *eec = *eec | IXGBE_EEC_SK;
2055 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2056 IXGBE_WRITE_FLUSH(hw);
2061 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2062 * @hw: pointer to hardware structure
2063 * @eecd: EECD's current value
2065 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2067 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2070 * Lower the clock input to the EEPROM (clearing the SK bit), then
2073 *eec = *eec & ~IXGBE_EEC_SK;
2074 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2075 IXGBE_WRITE_FLUSH(hw);
2080 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2081 * @hw: pointer to hardware structure
2083 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2087 DEBUGFUNC("ixgbe_release_eeprom");
2089 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2091 eec |= IXGBE_EEC_CS; /* Pull CS high */
2092 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2094 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2095 IXGBE_WRITE_FLUSH(hw);
2099 /* Stop requesting EEPROM access */
2100 eec &= ~IXGBE_EEC_REQ;
2101 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2103 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2105 /* Delay before attempt to obtain semaphore again to allow FW access */
2106 msec_delay(hw->eeprom.semaphore_delay);
2110 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2111 * @hw: pointer to hardware structure
2113 * Returns a negative error code on error, or the 16-bit checksum
2115 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2124 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2126 /* Include 0x0-0x3F in the checksum */
2127 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2128 if (hw->eeprom.ops.read(hw, i, &word)) {
2129 DEBUGOUT("EEPROM read failed\n");
2130 return IXGBE_ERR_EEPROM;
2135 /* Include all data from pointers except for the fw pointer */
2136 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2137 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2138 DEBUGOUT("EEPROM read failed\n");
2139 return IXGBE_ERR_EEPROM;
2142 /* If the pointer seems invalid */
2143 if (pointer == 0xFFFF || pointer == 0)
2146 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2147 DEBUGOUT("EEPROM read failed\n");
2148 return IXGBE_ERR_EEPROM;
2151 if (length == 0xFFFF || length == 0)
2154 for (j = pointer + 1; j <= pointer + length; j++) {
2155 if (hw->eeprom.ops.read(hw, j, &word)) {
2156 DEBUGOUT("EEPROM read failed\n");
2157 return IXGBE_ERR_EEPROM;
2163 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2165 return (s32)checksum;
2169 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2170 * @hw: pointer to hardware structure
2171 * @checksum_val: calculated checksum
2173 * Performs checksum calculation and validates the EEPROM checksum. If the
2174 * caller does not need checksum_val, the value can be NULL.
2176 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2181 u16 read_checksum = 0;
2183 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2185 /* Read the first word from the EEPROM. If this times out or fails, do
2186 * not continue or we could be in for a very long wait while every
2189 status = hw->eeprom.ops.read(hw, 0, &checksum);
2191 DEBUGOUT("EEPROM read failed\n");
2195 status = hw->eeprom.ops.calc_checksum(hw);
2199 checksum = (u16)(status & 0xffff);
2201 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2203 DEBUGOUT("EEPROM read failed\n");
2207 /* Verify read checksum from EEPROM is the same as
2208 * calculated checksum
2210 if (read_checksum != checksum)
2211 status = IXGBE_ERR_EEPROM_CHECKSUM;
2213 /* If the user cares, return the calculated checksum */
2215 *checksum_val = checksum;
2221 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2222 * @hw: pointer to hardware structure
2224 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2229 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2231 /* Read the first word from the EEPROM. If this times out or fails, do
2232 * not continue or we could be in for a very long wait while every
2235 status = hw->eeprom.ops.read(hw, 0, &checksum);
2237 DEBUGOUT("EEPROM read failed\n");
2241 status = hw->eeprom.ops.calc_checksum(hw);
2245 checksum = (u16)(status & 0xffff);
2247 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2253 * ixgbe_validate_mac_addr - Validate MAC address
2254 * @mac_addr: pointer to MAC address.
2256 * Tests a MAC address to ensure it is a valid Individual Address.
2258 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2260 s32 status = IXGBE_SUCCESS;
2262 DEBUGFUNC("ixgbe_validate_mac_addr");
2264 /* Make sure it is not a multicast address */
2265 if (IXGBE_IS_MULTICAST(mac_addr)) {
2266 status = IXGBE_ERR_INVALID_MAC_ADDR;
2267 /* Not a broadcast address */
2268 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2269 status = IXGBE_ERR_INVALID_MAC_ADDR;
2270 /* Reject the zero address */
2271 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2272 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2273 status = IXGBE_ERR_INVALID_MAC_ADDR;
2279 * ixgbe_set_rar_generic - Set Rx address register
2280 * @hw: pointer to hardware structure
2281 * @index: Receive address register to write
2282 * @addr: Address to put into receive address register
2283 * @vmdq: VMDq "set" or "pool" index
2284 * @enable_addr: set flag that address is active
2286 * Puts an ethernet address into a receive address register.
2288 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2291 u32 rar_low, rar_high;
2292 u32 rar_entries = hw->mac.num_rar_entries;
2294 DEBUGFUNC("ixgbe_set_rar_generic");
2296 /* Make sure we are using a valid rar index range */
2297 if (index >= rar_entries) {
2298 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2299 "RAR index %d is out of range.\n", index);
2300 return IXGBE_ERR_INVALID_ARGUMENT;
2303 /* setup VMDq pool selection before this RAR gets enabled */
2304 hw->mac.ops.set_vmdq(hw, index, vmdq);
2307 * HW expects these in little endian so we reverse the byte
2308 * order from network order (big endian) to little endian
2310 rar_low = ((u32)addr[0] |
2311 ((u32)addr[1] << 8) |
2312 ((u32)addr[2] << 16) |
2313 ((u32)addr[3] << 24));
2315 * Some parts put the VMDq setting in the extra RAH bits,
2316 * so save everything except the lower 16 bits that hold part
2317 * of the address and the address valid bit.
2319 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2320 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2321 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2323 if (enable_addr != 0)
2324 rar_high |= IXGBE_RAH_AV;
2326 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2327 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2329 return IXGBE_SUCCESS;
2333 * ixgbe_clear_rar_generic - Remove Rx address register
2334 * @hw: pointer to hardware structure
2335 * @index: Receive address register to write
2337 * Clears an ethernet address from a receive address register.
2339 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2342 u32 rar_entries = hw->mac.num_rar_entries;
2344 DEBUGFUNC("ixgbe_clear_rar_generic");
2346 /* Make sure we are using a valid rar index range */
2347 if (index >= rar_entries) {
2348 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2349 "RAR index %d is out of range.\n", index);
2350 return IXGBE_ERR_INVALID_ARGUMENT;
2354 * Some parts put the VMDq setting in the extra RAH bits,
2355 * so save everything except the lower 16 bits that hold part
2356 * of the address and the address valid bit.
2358 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2359 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2361 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2362 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2364 /* clear VMDq pool/queue selection for this RAR */
2365 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2367 return IXGBE_SUCCESS;
2371 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2372 * @hw: pointer to hardware structure
2374 * Places the MAC address in receive address register 0 and clears the rest
2375 * of the receive address registers. Clears the multicast table. Assumes
2376 * the receiver is in reset when the routine is called.
2378 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2381 u32 rar_entries = hw->mac.num_rar_entries;
2383 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2386 * If the current mac address is valid, assume it is a software override
2387 * to the permanent address.
2388 * Otherwise, use the permanent address from the eeprom.
2390 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2391 IXGBE_ERR_INVALID_MAC_ADDR) {
2392 /* Get the MAC address from the RAR0 for later reference */
2393 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2395 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2396 hw->mac.addr[0], hw->mac.addr[1],
2398 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2399 hw->mac.addr[4], hw->mac.addr[5]);
2401 /* Setup the receive address. */
2402 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2403 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2404 hw->mac.addr[0], hw->mac.addr[1],
2406 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2407 hw->mac.addr[4], hw->mac.addr[5]);
2409 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2412 /* clear VMDq pool/queue selection for RAR 0 */
2413 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2415 hw->addr_ctrl.overflow_promisc = 0;
2417 hw->addr_ctrl.rar_used_count = 1;
2419 /* Zero out the other receive addresses. */
2420 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2421 for (i = 1; i < rar_entries; i++) {
2422 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2423 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2427 hw->addr_ctrl.mta_in_use = 0;
2428 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2430 DEBUGOUT(" Clearing MTA\n");
2431 for (i = 0; i < hw->mac.mcft_size; i++)
2432 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2434 ixgbe_init_uta_tables(hw);
2436 return IXGBE_SUCCESS;
2440 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2441 * @hw: pointer to hardware structure
2442 * @addr: new address
2444 * Adds it to unused receive address register or goes into promiscuous mode.
2446 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2448 u32 rar_entries = hw->mac.num_rar_entries;
2451 DEBUGFUNC("ixgbe_add_uc_addr");
2453 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2454 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2457 * Place this address in the RAR if there is room,
2458 * else put the controller into promiscuous mode
2460 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2461 rar = hw->addr_ctrl.rar_used_count;
2462 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2463 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2464 hw->addr_ctrl.rar_used_count++;
2466 hw->addr_ctrl.overflow_promisc++;
2469 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2473 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2474 * @hw: pointer to hardware structure
2475 * @addr_list: the list of new addresses
2476 * @addr_count: number of addresses
2477 * @next: iterator function to walk the address list
2479 * The given list replaces any existing list. Clears the secondary addrs from
2480 * receive address registers. Uses unused receive address registers for the
2481 * first secondary addresses, and falls back to promiscuous mode as needed.
2483 * Drivers using secondary unicast addresses must set user_set_promisc when
2484 * manually putting the device into promiscuous mode.
2486 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2487 u32 addr_count, ixgbe_mc_addr_itr next)
2491 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2496 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2499 * Clear accounting of old secondary address list,
2500 * don't count RAR[0]
2502 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2503 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2504 hw->addr_ctrl.overflow_promisc = 0;
2506 /* Zero out the other receive addresses */
2507 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2508 for (i = 0; i < uc_addr_in_use; i++) {
2509 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2510 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2513 /* Add the new addresses */
2514 for (i = 0; i < addr_count; i++) {
2515 DEBUGOUT(" Adding the secondary addresses:\n");
2516 addr = next(hw, &addr_list, &vmdq);
2517 ixgbe_add_uc_addr(hw, addr, vmdq);
2520 if (hw->addr_ctrl.overflow_promisc) {
2521 /* enable promisc if not already in overflow or set by user */
2522 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2523 DEBUGOUT(" Entering address overflow promisc mode\n");
2524 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2525 fctrl |= IXGBE_FCTRL_UPE;
2526 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2529 /* only disable if set by overflow, not by user */
2530 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2531 DEBUGOUT(" Leaving address overflow promisc mode\n");
2532 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2533 fctrl &= ~IXGBE_FCTRL_UPE;
2534 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2538 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2539 return IXGBE_SUCCESS;
2543 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2544 * @hw: pointer to hardware structure
2545 * @mc_addr: the multicast address
2547 * Extracts the 12 bits, from a multicast address, to determine which
2548 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2549 * incoming rx multicast addresses, to determine the bit-vector to check in
2550 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2551 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2552 * to mc_filter_type.
2554 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2558 DEBUGFUNC("ixgbe_mta_vector");
2560 switch (hw->mac.mc_filter_type) {
2561 case 0: /* use bits [47:36] of the address */
2562 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2564 case 1: /* use bits [46:35] of the address */
2565 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2567 case 2: /* use bits [45:34] of the address */
2568 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2570 case 3: /* use bits [43:32] of the address */
2571 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2573 default: /* Invalid mc_filter_type */
2574 DEBUGOUT("MC filter type param set incorrectly\n");
2579 /* vector can only be 12-bits or boundary will be exceeded */
2585 * ixgbe_set_mta - Set bit-vector in multicast table
2586 * @hw: pointer to hardware structure
2587 * @hash_value: Multicast address hash value
2589 * Sets the bit-vector in the multicast table.
2591 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2597 DEBUGFUNC("ixgbe_set_mta");
2599 hw->addr_ctrl.mta_in_use++;
2601 vector = ixgbe_mta_vector(hw, mc_addr);
2602 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2605 * The MTA is a register array of 128 32-bit registers. It is treated
2606 * like an array of 4096 bits. We want to set bit
2607 * BitArray[vector_value]. So we figure out what register the bit is
2608 * in, read it, OR in the new bit, then write back the new value. The
2609 * register is determined by the upper 7 bits of the vector value and
2610 * the bit within that register are determined by the lower 5 bits of
2613 vector_reg = (vector >> 5) & 0x7F;
2614 vector_bit = vector & 0x1F;
2615 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2619 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2620 * @hw: pointer to hardware structure
2621 * @mc_addr_list: the list of new multicast addresses
2622 * @mc_addr_count: number of addresses
2623 * @next: iterator function to walk the multicast address list
2624 * @clear: flag, when set clears the table beforehand
2626 * When the clear flag is set, the given list replaces any existing list.
2627 * Hashes the given addresses into the multicast table.
2629 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2630 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2636 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2639 * Set the new number of MC addresses that we are being requested to
2642 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2643 hw->addr_ctrl.mta_in_use = 0;
2645 /* Clear mta_shadow */
2647 DEBUGOUT(" Clearing MTA\n");
2648 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2651 /* Update mta_shadow */
2652 for (i = 0; i < mc_addr_count; i++) {
2653 DEBUGOUT(" Adding the multicast addresses:\n");
2654 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2658 for (i = 0; i < hw->mac.mcft_size; i++)
2659 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2660 hw->mac.mta_shadow[i]);
2662 if (hw->addr_ctrl.mta_in_use > 0)
2663 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2664 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2666 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2667 return IXGBE_SUCCESS;
2671 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2672 * @hw: pointer to hardware structure
2674 * Enables multicast address in RAR and the use of the multicast hash table.
2676 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2678 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2680 DEBUGFUNC("ixgbe_enable_mc_generic");
2682 if (a->mta_in_use > 0)
2683 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2684 hw->mac.mc_filter_type);
2686 return IXGBE_SUCCESS;
2690 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2691 * @hw: pointer to hardware structure
2693 * Disables multicast address in RAR and the use of the multicast hash table.
2695 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2697 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2699 DEBUGFUNC("ixgbe_disable_mc_generic");
2701 if (a->mta_in_use > 0)
2702 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2704 return IXGBE_SUCCESS;
2708 * ixgbe_fc_enable_generic - Enable flow control
2709 * @hw: pointer to hardware structure
2711 * Enable flow control according to the current settings.
2713 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2715 s32 ret_val = IXGBE_SUCCESS;
2716 u32 mflcn_reg, fccfg_reg;
2721 DEBUGFUNC("ixgbe_fc_enable_generic");
2723 /* Validate the water mark configuration */
2724 if (!hw->fc.pause_time) {
2725 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2729 /* Low water mark of zero causes XOFF floods */
2730 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2731 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2732 hw->fc.high_water[i]) {
2733 if (!hw->fc.low_water[i] ||
2734 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2735 DEBUGOUT("Invalid water mark configuration\n");
2736 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2742 /* Negotiate the fc mode to use */
2743 hw->mac.ops.fc_autoneg(hw);
2745 /* Disable any previous flow control settings */
2746 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2747 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2749 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2750 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2753 * The possible values of fc.current_mode are:
2754 * 0: Flow control is completely disabled
2755 * 1: Rx flow control is enabled (we can receive pause frames,
2756 * but not send pause frames).
2757 * 2: Tx flow control is enabled (we can send pause frames but
2758 * we do not support receiving pause frames).
2759 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2762 switch (hw->fc.current_mode) {
2765 * Flow control is disabled by software override or autoneg.
2766 * The code below will actually disable it in the HW.
2769 case ixgbe_fc_rx_pause:
2771 * Rx Flow control is enabled and Tx Flow control is
2772 * disabled by software override. Since there really
2773 * isn't a way to advertise that we are capable of RX
2774 * Pause ONLY, we will advertise that we support both
2775 * symmetric and asymmetric Rx PAUSE. Later, we will
2776 * disable the adapter's ability to send PAUSE frames.
2778 mflcn_reg |= IXGBE_MFLCN_RFCE;
2780 case ixgbe_fc_tx_pause:
2782 * Tx Flow control is enabled, and Rx Flow control is
2783 * disabled by software override.
2785 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2788 /* Flow control (both Rx and Tx) is enabled by SW override. */
2789 mflcn_reg |= IXGBE_MFLCN_RFCE;
2790 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2793 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2794 "Flow control param set incorrectly\n");
2795 ret_val = IXGBE_ERR_CONFIG;
2800 /* Set 802.3x based flow control settings. */
2801 mflcn_reg |= IXGBE_MFLCN_DPF;
2802 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2803 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2806 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2807 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2808 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2809 hw->fc.high_water[i]) {
2810 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2811 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2812 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2814 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2816 * In order to prevent Tx hangs when the internal Tx
2817 * switch is enabled we must set the high water mark
2818 * to the Rx packet buffer size - 24KB. This allows
2819 * the Tx switch to function even under heavy Rx
2822 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2825 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2828 /* Configure pause time (2 TCs per register) */
2829 reg = hw->fc.pause_time * 0x00010001;
2830 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2831 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2833 /* Configure flow control refresh threshold value */
2834 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2841 * ixgbe_negotiate_fc - Negotiate flow control
2842 * @hw: pointer to hardware structure
2843 * @adv_reg: flow control advertised settings
2844 * @lp_reg: link partner's flow control settings
2845 * @adv_sym: symmetric pause bit in advertisement
2846 * @adv_asm: asymmetric pause bit in advertisement
2847 * @lp_sym: symmetric pause bit in link partner advertisement
2848 * @lp_asm: asymmetric pause bit in link partner advertisement
2850 * Find the intersection between advertised settings and link partner's
2851 * advertised settings
2853 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2854 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2856 if ((!(adv_reg)) || (!(lp_reg))) {
2857 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2858 "Local or link partner's advertised flow control "
2859 "settings are NULL. Local: %x, link partner: %x\n",
2861 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2864 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2866 * Now we need to check if the user selected Rx ONLY
2867 * of pause frames. In this case, we had to advertise
2868 * FULL flow control because we could not advertise RX
2869 * ONLY. Hence, we must now check to see if we need to
2870 * turn OFF the TRANSMISSION of PAUSE frames.
2872 if (hw->fc.requested_mode == ixgbe_fc_full) {
2873 hw->fc.current_mode = ixgbe_fc_full;
2874 DEBUGOUT("Flow Control = FULL.\n");
2876 hw->fc.current_mode = ixgbe_fc_rx_pause;
2877 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2879 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2880 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2881 hw->fc.current_mode = ixgbe_fc_tx_pause;
2882 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2883 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2884 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2885 hw->fc.current_mode = ixgbe_fc_rx_pause;
2886 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2888 hw->fc.current_mode = ixgbe_fc_none;
2889 DEBUGOUT("Flow Control = NONE.\n");
2891 return IXGBE_SUCCESS;
2895 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2896 * @hw: pointer to hardware structure
2898 * Enable flow control according on 1 gig fiber.
2900 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2902 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2903 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2906 * On multispeed fiber at 1g, bail out if
2907 * - link is up but AN did not complete, or if
2908 * - link is up and AN completed but timed out
2911 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2912 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2913 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2914 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2918 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2919 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2921 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2922 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2923 IXGBE_PCS1GANA_ASM_PAUSE,
2924 IXGBE_PCS1GANA_SYM_PAUSE,
2925 IXGBE_PCS1GANA_ASM_PAUSE);
2932 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2933 * @hw: pointer to hardware structure
2935 * Enable flow control according to IEEE clause 37.
2937 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2939 u32 links2, anlp1_reg, autoc_reg, links;
2940 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2943 * On backplane, bail out if
2944 * - backplane autoneg was not completed, or if
2945 * - we are 82599 and link partner is not AN enabled
2947 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2948 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2949 DEBUGOUT("Auto-Negotiation did not complete\n");
2953 if (hw->mac.type == ixgbe_mac_82599EB) {
2954 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2955 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2956 DEBUGOUT("Link partner is not AN enabled\n");
2961 * Read the 10g AN autoc and LP ability registers and resolve
2962 * local flow control settings accordingly
2964 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2965 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2967 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2968 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2969 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2976 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2977 * @hw: pointer to hardware structure
2979 * Enable flow control according to IEEE clause 37.
2981 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2983 u16 technology_ability_reg = 0;
2984 u16 lp_technology_ability_reg = 0;
2986 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2987 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2988 &technology_ability_reg);
2989 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2990 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2991 &lp_technology_ability_reg);
2993 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2994 (u32)lp_technology_ability_reg,
2995 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2996 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3000 * ixgbe_fc_autoneg - Configure flow control
3001 * @hw: pointer to hardware structure
3003 * Compares our advertised flow control capabilities to those advertised by
3004 * our link partner, and determines the proper flow control mode to use.
3006 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3008 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3009 ixgbe_link_speed speed;
3012 DEBUGFUNC("ixgbe_fc_autoneg");
3015 * AN should have completed when the cable was plugged in.
3016 * Look for reasons to bail out. Bail out if:
3017 * - FC autoneg is disabled, or if
3020 if (hw->fc.disable_fc_autoneg) {
3021 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3022 "Flow control autoneg is disabled");
3026 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3028 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3032 switch (hw->phy.media_type) {
3033 /* Autoneg flow control on fiber adapters */
3034 case ixgbe_media_type_fiber_qsfp:
3035 case ixgbe_media_type_fiber:
3036 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3037 ret_val = ixgbe_fc_autoneg_fiber(hw);
3040 /* Autoneg flow control on backplane adapters */
3041 case ixgbe_media_type_backplane:
3042 ret_val = ixgbe_fc_autoneg_backplane(hw);
3045 /* Autoneg flow control on copper adapters */
3046 case ixgbe_media_type_copper:
3047 if (ixgbe_device_supports_autoneg_fc(hw))
3048 ret_val = ixgbe_fc_autoneg_copper(hw);
3056 if (ret_val == IXGBE_SUCCESS) {
3057 hw->fc.fc_was_autonegged = true;
3059 hw->fc.fc_was_autonegged = false;
3060 hw->fc.current_mode = hw->fc.requested_mode;
3065 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3066 * @hw: pointer to hardware structure
3068 * System-wide timeout range is encoded in PCIe Device Control2 register.
3070 * Add 10% to specified maximum and return the number of times to poll for
3071 * completion timeout, in units of 100 microsec. Never return less than
3072 * 800 = 80 millisec.
3074 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3079 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3080 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3083 case IXGBE_PCIDEVCTRL2_65_130ms:
3084 pollcnt = 1300; /* 130 millisec */
3086 case IXGBE_PCIDEVCTRL2_260_520ms:
3087 pollcnt = 5200; /* 520 millisec */
3089 case IXGBE_PCIDEVCTRL2_1_2s:
3090 pollcnt = 20000; /* 2 sec */
3092 case IXGBE_PCIDEVCTRL2_4_8s:
3093 pollcnt = 80000; /* 8 sec */
3095 case IXGBE_PCIDEVCTRL2_17_34s:
3096 pollcnt = 34000; /* 34 sec */
3098 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3099 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3100 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3101 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3103 pollcnt = 800; /* 80 millisec minimum */
3107 /* add 10% to spec maximum */
3108 return (pollcnt * 11) / 10;
3112 * ixgbe_disable_pcie_master - Disable PCI-express master access
3113 * @hw: pointer to hardware structure
3115 * Disables PCI-Express master access and verifies there are no pending
3116 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3117 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3118 * is returned signifying master requests disabled.
3120 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3122 s32 status = IXGBE_SUCCESS;
3126 DEBUGFUNC("ixgbe_disable_pcie_master");
3128 /* Always set this bit to ensure any future transactions are blocked */
3129 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3131 /* Exit if master requests are blocked */
3132 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3133 IXGBE_REMOVED(hw->hw_addr))
3136 /* Poll for master request bit to clear */
3137 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3139 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3144 * Two consecutive resets are required via CTRL.RST per datasheet
3145 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3146 * of this need. The first reset prevents new master requests from
3147 * being issued by our device. We then must wait 1usec or more for any
3148 * remaining completions from the PCIe bus to trickle in, and then reset
3149 * again to clear out any effects they may have had on our device.
3151 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3152 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3154 if (hw->mac.type >= ixgbe_mac_X550)
3158 * Before proceeding, make sure that the PCIe block does not have
3159 * transactions pending.
3161 poll = ixgbe_pcie_timeout_poll(hw);
3162 for (i = 0; i < poll; i++) {
3164 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3165 if (IXGBE_REMOVED(hw->hw_addr))
3167 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3171 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3172 "PCIe transaction pending bit also did not clear.\n");
3173 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3180 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3181 * @hw: pointer to hardware structure
3182 * @mask: Mask to specify which semaphore to acquire
3184 * Acquires the SWFW semaphore through the GSSR register for the specified
3185 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3187 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3191 u32 fwmask = mask << 5;
3195 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3197 for (i = 0; i < timeout; i++) {
3199 * SW NVM semaphore bit is used for access to all
3200 * SW_FW_SYNC bits (not just NVM)
3202 if (ixgbe_get_eeprom_semaphore(hw))
3203 return IXGBE_ERR_SWFW_SYNC;
3205 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3206 if (!(gssr & (fwmask | swmask))) {
3208 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3209 ixgbe_release_eeprom_semaphore(hw);
3210 return IXGBE_SUCCESS;
3212 /* Resource is currently in use by FW or SW */
3213 ixgbe_release_eeprom_semaphore(hw);
3218 /* If time expired clear the bits holding the lock and retry */
3219 if (gssr & (fwmask | swmask))
3220 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3223 return IXGBE_ERR_SWFW_SYNC;
3227 * ixgbe_release_swfw_sync - Release SWFW semaphore
3228 * @hw: pointer to hardware structure
3229 * @mask: Mask to specify which semaphore to release
3231 * Releases the SWFW semaphore through the GSSR register for the specified
3232 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3234 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3239 DEBUGFUNC("ixgbe_release_swfw_sync");
3241 ixgbe_get_eeprom_semaphore(hw);
3243 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3245 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3247 ixgbe_release_eeprom_semaphore(hw);
3251 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3252 * @hw: pointer to hardware structure
3254 * Stops the receive data path and waits for the HW to internally empty
3255 * the Rx security block
3257 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3259 #define IXGBE_MAX_SECRX_POLL 40
3264 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3267 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3268 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3269 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3270 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3271 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3272 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3275 /* Use interrupt-safe sleep just in case */
3279 /* For informational purposes only */
3280 if (i >= IXGBE_MAX_SECRX_POLL)
3281 DEBUGOUT("Rx unit being enabled before security "
3282 "path fully disabled. Continuing with init.\n");
3284 return IXGBE_SUCCESS;
3288 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3289 * @hw: pointer to hardware structure
3290 * @reg_val: Value we read from AUTOC
3292 * The default case requires no protection so just to the register read.
3294 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3297 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3298 return IXGBE_SUCCESS;
3302 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3303 * @hw: pointer to hardware structure
3304 * @reg_val: value to write to AUTOC
3305 * @locked: bool to indicate whether the SW/FW lock was already taken by
3308 * The default case requires no protection so just to the register write.
3310 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3312 UNREFERENCED_1PARAMETER(locked);
3314 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3315 return IXGBE_SUCCESS;
3319 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3320 * @hw: pointer to hardware structure
3322 * Enables the receive data path.
3324 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3328 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3330 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3331 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3332 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3333 IXGBE_WRITE_FLUSH(hw);
3335 return IXGBE_SUCCESS;
3339 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3340 * @hw: pointer to hardware structure
3341 * @regval: register value to write to RXCTRL
3343 * Enables the Rx DMA unit
3345 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3347 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3349 if (regval & IXGBE_RXCTRL_RXEN)
3350 ixgbe_enable_rx(hw);
3352 ixgbe_disable_rx(hw);
3354 return IXGBE_SUCCESS;
3358 * ixgbe_blink_led_start_generic - Blink LED based on index.
3359 * @hw: pointer to hardware structure
3360 * @index: led number to blink
3362 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3364 ixgbe_link_speed speed = 0;
3367 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3368 s32 ret_val = IXGBE_SUCCESS;
3369 bool locked = false;
3371 DEBUGFUNC("ixgbe_blink_led_start_generic");
3374 * Link must be up to auto-blink the LEDs;
3375 * Force it if link is down.
3377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3380 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3381 if (ret_val != IXGBE_SUCCESS)
3384 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3385 autoc_reg |= IXGBE_AUTOC_FLU;
3387 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3388 if (ret_val != IXGBE_SUCCESS)
3391 IXGBE_WRITE_FLUSH(hw);
3395 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3396 led_reg |= IXGBE_LED_BLINK(index);
3397 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3398 IXGBE_WRITE_FLUSH(hw);
3405 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3406 * @hw: pointer to hardware structure
3407 * @index: led number to stop blinking
3409 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3412 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3413 s32 ret_val = IXGBE_SUCCESS;
3414 bool locked = false;
3416 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3418 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3419 if (ret_val != IXGBE_SUCCESS)
3422 autoc_reg &= ~IXGBE_AUTOC_FLU;
3423 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3425 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3426 if (ret_val != IXGBE_SUCCESS)
3429 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3430 led_reg &= ~IXGBE_LED_BLINK(index);
3431 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3432 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3433 IXGBE_WRITE_FLUSH(hw);
3440 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3441 * @hw: pointer to hardware structure
3442 * @san_mac_offset: SAN MAC address offset
3444 * This function will read the EEPROM location for the SAN MAC address
3445 * pointer, and returns the value at that location. This is used in both
3446 * get and set mac_addr routines.
3448 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3449 u16 *san_mac_offset)
3453 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3456 * First read the EEPROM pointer to see if the MAC addresses are
3459 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3462 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3463 "eeprom at offset %d failed",
3464 IXGBE_SAN_MAC_ADDR_PTR);
3471 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3472 * @hw: pointer to hardware structure
3473 * @san_mac_addr: SAN MAC address
3475 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3476 * per-port, so set_lan_id() must be called before reading the addresses.
3477 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3478 * upon for non-SFP connections, so we must call it here.
3480 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3482 u16 san_mac_data, san_mac_offset;
3486 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3489 * First read the EEPROM pointer to see if the MAC addresses are
3490 * available. If they're not, no point in calling set_lan_id() here.
3492 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3493 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3494 goto san_mac_addr_out;
3496 /* make sure we know which port we need to program */
3497 hw->mac.ops.set_lan_id(hw);
3498 /* apply the port offset to the address offset */
3499 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3500 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3501 for (i = 0; i < 3; i++) {
3502 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3505 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3506 "eeprom read at offset %d failed",
3508 goto san_mac_addr_out;
3510 san_mac_addr[i * 2] = (u8)(san_mac_data);
3511 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3514 return IXGBE_SUCCESS;
3518 * No addresses available in this EEPROM. It's not an
3519 * error though, so just wipe the local address and return.
3521 for (i = 0; i < 6; i++)
3522 san_mac_addr[i] = 0xFF;
3523 return IXGBE_SUCCESS;
3527 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3528 * @hw: pointer to hardware structure
3529 * @san_mac_addr: SAN MAC address
3531 * Write a SAN MAC address to the EEPROM.
3533 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3536 u16 san_mac_data, san_mac_offset;
3539 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3541 /* Look for SAN mac address pointer. If not defined, return */
3542 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3543 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3544 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3546 /* Make sure we know which port we need to write */
3547 hw->mac.ops.set_lan_id(hw);
3548 /* Apply the port offset to the address offset */
3549 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3550 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3552 for (i = 0; i < 3; i++) {
3553 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3554 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3555 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3559 return IXGBE_SUCCESS;
3563 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3564 * @hw: pointer to hardware structure
3566 * Read PCIe configuration space, and get the MSI-X vector count from
3567 * the capabilities table.
3569 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3575 switch (hw->mac.type) {
3576 case ixgbe_mac_82598EB:
3577 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3578 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3580 case ixgbe_mac_82599EB:
3581 case ixgbe_mac_X540:
3582 case ixgbe_mac_X550:
3583 case ixgbe_mac_X550EM_x:
3584 case ixgbe_mac_X550EM_a:
3585 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3586 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3592 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3593 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3594 if (IXGBE_REMOVED(hw->hw_addr))
3596 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3598 /* MSI-X count is zero-based in HW */
3601 if (msix_count > max_msix_count)
3602 msix_count = max_msix_count;
3608 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3609 * @hw: pointer to hardware structure
3610 * @addr: Address to put into receive address register
3611 * @vmdq: VMDq pool to assign
3613 * Puts an ethernet address into a receive address register, or
3614 * finds the rar that it is aleady in; adds to the pool list
3616 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3618 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3619 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3621 u32 rar_low, rar_high;
3622 u32 addr_low, addr_high;
3624 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3626 /* swap bytes for HW little endian */
3627 addr_low = addr[0] | (addr[1] << 8)
3630 addr_high = addr[4] | (addr[5] << 8);
3633 * Either find the mac_id in rar or find the first empty space.
3634 * rar_highwater points to just after the highest currently used
3635 * rar in order to shorten the search. It grows when we add a new
3638 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3639 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3641 if (((IXGBE_RAH_AV & rar_high) == 0)
3642 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3643 first_empty_rar = rar;
3644 } else if ((rar_high & 0xFFFF) == addr_high) {
3645 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3646 if (rar_low == addr_low)
3647 break; /* found it already in the rars */
3651 if (rar < hw->mac.rar_highwater) {
3652 /* already there so just add to the pool bits */
3653 ixgbe_set_vmdq(hw, rar, vmdq);
3654 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3655 /* stick it into first empty RAR slot we found */
3656 rar = first_empty_rar;
3657 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3658 } else if (rar == hw->mac.rar_highwater) {
3659 /* add it to the top of the list and inc the highwater mark */
3660 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3661 hw->mac.rar_highwater++;
3662 } else if (rar >= hw->mac.num_rar_entries) {
3663 return IXGBE_ERR_INVALID_MAC_ADDR;
3667 * If we found rar[0], make sure the default pool bit (we use pool 0)
3668 * remains cleared to be sure default pool packets will get delivered
3671 ixgbe_clear_vmdq(hw, rar, 0);
3677 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3678 * @hw: pointer to hardware struct
3679 * @rar: receive address register index to disassociate
3680 * @vmdq: VMDq pool index to remove from the rar
3682 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3684 u32 mpsar_lo, mpsar_hi;
3685 u32 rar_entries = hw->mac.num_rar_entries;
3687 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3689 /* Make sure we are using a valid rar index range */
3690 if (rar >= rar_entries) {
3691 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3692 "RAR index %d is out of range.\n", rar);
3693 return IXGBE_ERR_INVALID_ARGUMENT;
3696 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3697 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3699 if (IXGBE_REMOVED(hw->hw_addr))
3702 if (!mpsar_lo && !mpsar_hi)
3705 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3707 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3711 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3714 } else if (vmdq < 32) {
3715 mpsar_lo &= ~(1 << vmdq);
3716 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3718 mpsar_hi &= ~(1 << (vmdq - 32));
3719 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3722 /* was that the last pool using this rar? */
3723 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3724 hw->mac.ops.clear_rar(hw, rar);
3726 return IXGBE_SUCCESS;
3730 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3731 * @hw: pointer to hardware struct
3732 * @rar: receive address register index to associate with a VMDq index
3733 * @vmdq: VMDq pool index
3735 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3738 u32 rar_entries = hw->mac.num_rar_entries;
3740 DEBUGFUNC("ixgbe_set_vmdq_generic");
3742 /* Make sure we are using a valid rar index range */
3743 if (rar >= rar_entries) {
3744 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3745 "RAR index %d is out of range.\n", rar);
3746 return IXGBE_ERR_INVALID_ARGUMENT;
3750 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3752 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3754 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3755 mpsar |= 1 << (vmdq - 32);
3756 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3758 return IXGBE_SUCCESS;
3762 * This function should only be involved in the IOV mode.
3763 * In IOV mode, Default pool is next pool after the number of
3764 * VFs advertized and not 0.
3765 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3767 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3768 * @hw: pointer to hardware struct
3769 * @vmdq: VMDq pool index
3771 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3773 u32 rar = hw->mac.san_mac_rar_index;
3775 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3778 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3779 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3781 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3782 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3785 return IXGBE_SUCCESS;
3789 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3790 * @hw: pointer to hardware structure
3792 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3796 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3797 DEBUGOUT(" Clearing UTA\n");
3799 for (i = 0; i < 128; i++)
3800 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3802 return IXGBE_SUCCESS;
3806 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3807 * @hw: pointer to hardware structure
3808 * @vlan: VLAN id to write to VLAN filter
3810 * return the VLVF index where this VLAN id should be placed
3813 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3815 s32 regindex, first_empty_slot;
3818 /* short cut the special case */
3822 /* if vlvf_bypass is set we don't want to use an empty slot, we
3823 * will simply bypass the VLVF if there are no entries present in the
3824 * VLVF that contain our VLAN
3826 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3828 /* add VLAN enable bit for comparison */
3829 vlan |= IXGBE_VLVF_VIEN;
3831 /* Search for the vlan id in the VLVF entries. Save off the first empty
3832 * slot found along the way.
3834 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3836 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3837 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3840 if (!first_empty_slot && !bits)
3841 first_empty_slot = regindex;
3844 /* If we are here then we didn't find the VLAN. Return first empty
3845 * slot we found during our search, else error.
3847 if (!first_empty_slot)
3848 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3850 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3854 * ixgbe_set_vfta_generic - Set VLAN filter table
3855 * @hw: pointer to hardware structure
3856 * @vlan: VLAN id to write to VLAN filter
3857 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3858 * @vlan_on: boolean flag to turn on/off VLAN
3859 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3861 * Turn on/off specified VLAN in the VLAN filter table.
3863 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3864 bool vlan_on, bool vlvf_bypass)
3866 u32 regidx, vfta_delta, vfta;
3869 DEBUGFUNC("ixgbe_set_vfta_generic");
3871 if (vlan > 4095 || vind > 63)
3872 return IXGBE_ERR_PARAM;
3875 * this is a 2 part operation - first the VFTA, then the
3876 * VLVF and VLVFB if VT Mode is set
3877 * We don't write the VFTA until we know the VLVF part succeeded.
3881 * The VFTA is a bitstring made up of 128 32-bit registers
3882 * that enable the particular VLAN id, much like the MTA:
3883 * bits[11-5]: which register
3884 * bits[4-0]: which bit in the register
3887 vfta_delta = 1 << (vlan % 32);
3888 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3890 /* vfta_delta represents the difference between the current value
3891 * of vfta and the value we want in the register. Since the diff
3892 * is an XOR mask we can just update the vfta using an XOR
3894 vfta_delta &= vlan_on ? ~vfta : vfta;
3898 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3900 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3902 if (ret_val != IXGBE_SUCCESS) {
3909 /* Update VFTA now that we are ready for traffic */
3911 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3913 return IXGBE_SUCCESS;
3917 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3918 * @hw: pointer to hardware structure
3919 * @vlan: VLAN id to write to VLAN filter
3920 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3921 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
3922 * @vfta_delta: pointer to the difference between the current value of VFTA
3923 * and the desired value
3924 * @vfta: the desired value of the VFTA
3925 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3927 * Turn on/off specified bit in VLVF table.
3929 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3930 bool vlan_on, u32 *vfta_delta, u32 vfta,
3936 DEBUGFUNC("ixgbe_set_vlvf_generic");
3938 if (vlan > 4095 || vind > 63)
3939 return IXGBE_ERR_PARAM;
3941 /* If VT Mode is set
3943 * make sure the vlan is in VLVF
3944 * set the vind bit in the matching VLVFB
3946 * clear the pool bit and possibly the vind
3948 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3949 return IXGBE_SUCCESS;
3950 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3954 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3956 /* set the pool bit */
3957 bits |= 1 << (vind % 32);
3961 /* clear the pool bit */
3962 bits ^= 1 << (vind % 32);
3965 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3966 /* Clear VFTA first, then disable VLVF. Otherwise
3967 * we run the risk of stray packets leaking into
3968 * the PF via the default pool
3971 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
3973 /* disable VLVF and clear remaining bit from pool */
3974 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3975 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3977 return IXGBE_SUCCESS;
3979 /* If there are still bits set in the VLVFB registers
3980 * for the VLAN ID indicated we need to see if the
3981 * caller is requesting that we clear the VFTA entry bit.
3982 * If the caller has requested that we clear the VFTA
3983 * entry bit but there are still pools/VFs using this VLAN
3984 * ID entry then ignore the request. We're not worried
3985 * about the case where we're turning the VFTA VLAN ID
3986 * entry bit on, only when requested to turn it off as
3987 * there may be multiple pools and/or VFs using the
3988 * VLAN ID entry. In that case we cannot clear the
3989 * VFTA bit until all pools/VFs using that VLAN ID have also
3990 * been cleared. This will be indicated by "bits" being
3996 /* record pool change and enable VLAN ID if not already enabled */
3997 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3998 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4000 return IXGBE_SUCCESS;
4004 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4005 * @hw: pointer to hardware structure
4007 * Clears the VLAN filer table, and the VMDq index associated with the filter
4009 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4013 DEBUGFUNC("ixgbe_clear_vfta_generic");
4015 for (offset = 0; offset < hw->mac.vft_size; offset++)
4016 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4018 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4019 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4020 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4021 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4024 return IXGBE_SUCCESS;
4028 * ixgbe_check_mac_link_generic - Determine link and speed status
4029 * @hw: pointer to hardware structure
4030 * @speed: pointer to link speed
4031 * @link_up: true when link is up
4032 * @link_up_wait_to_complete: bool used to wait for link up or not
4034 * Reads the links register to determine if link is up and the current speed
4036 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4037 bool *link_up, bool link_up_wait_to_complete)
4039 u32 links_reg, links_orig;
4042 DEBUGFUNC("ixgbe_check_mac_link_generic");
4044 /* clear the old state */
4045 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4047 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4049 if (links_orig != links_reg) {
4050 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4051 links_orig, links_reg);
4054 if (link_up_wait_to_complete) {
4055 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4056 if (links_reg & IXGBE_LINKS_UP) {
4063 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4066 if (links_reg & IXGBE_LINKS_UP)
4072 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4073 case IXGBE_LINKS_SPEED_10G_82599:
4074 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4075 if (hw->mac.type >= ixgbe_mac_X550) {
4076 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4077 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4080 case IXGBE_LINKS_SPEED_1G_82599:
4081 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4083 case IXGBE_LINKS_SPEED_100_82599:
4084 *speed = IXGBE_LINK_SPEED_100_FULL;
4085 if (hw->mac.type >= ixgbe_mac_X550) {
4086 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4087 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4091 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4094 return IXGBE_SUCCESS;
4098 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4100 * @hw: pointer to hardware structure
4101 * @wwnn_prefix: the alternative WWNN prefix
4102 * @wwpn_prefix: the alternative WWPN prefix
4104 * This function will read the EEPROM from the alternative SAN MAC address
4105 * block to check the support for the alternative WWNN/WWPN prefix support.
4107 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4111 u16 alt_san_mac_blk_offset;
4113 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4115 /* clear output first */
4116 *wwnn_prefix = 0xFFFF;
4117 *wwpn_prefix = 0xFFFF;
4119 /* check if alternative SAN MAC is supported */
4120 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4121 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4122 goto wwn_prefix_err;
4124 if ((alt_san_mac_blk_offset == 0) ||
4125 (alt_san_mac_blk_offset == 0xFFFF))
4126 goto wwn_prefix_out;
4128 /* check capability in alternative san mac address block */
4129 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4130 if (hw->eeprom.ops.read(hw, offset, &caps))
4131 goto wwn_prefix_err;
4132 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4133 goto wwn_prefix_out;
4135 /* get the corresponding prefix for WWNN/WWPN */
4136 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4137 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4138 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4139 "eeprom read at offset %d failed", offset);
4142 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4143 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4144 goto wwn_prefix_err;
4147 return IXGBE_SUCCESS;
4150 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4151 "eeprom read at offset %d failed", offset);
4152 return IXGBE_SUCCESS;
4156 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4157 * @hw: pointer to hardware structure
4158 * @bs: the fcoe boot status
4160 * This function will read the FCOE boot status from the iSCSI FCOE block
4162 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4164 u16 offset, caps, flags;
4167 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4169 /* clear output first */
4170 *bs = ixgbe_fcoe_bootstatus_unavailable;
4172 /* check if FCOE IBA block is present */
4173 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4174 status = hw->eeprom.ops.read(hw, offset, &caps);
4175 if (status != IXGBE_SUCCESS)
4178 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4181 /* check if iSCSI FCOE block is populated */
4182 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4183 if (status != IXGBE_SUCCESS)
4186 if ((offset == 0) || (offset == 0xFFFF))
4189 /* read fcoe flags in iSCSI FCOE block */
4190 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4191 status = hw->eeprom.ops.read(hw, offset, &flags);
4192 if (status != IXGBE_SUCCESS)
4195 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4196 *bs = ixgbe_fcoe_bootstatus_enabled;
4198 *bs = ixgbe_fcoe_bootstatus_disabled;
4205 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4206 * @hw: pointer to hardware structure
4207 * @enable: enable or disable switch for MAC anti-spoofing
4208 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4211 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4213 int vf_target_reg = vf >> 3;
4214 int vf_target_shift = vf % 8;
4217 if (hw->mac.type == ixgbe_mac_82598EB)
4220 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4222 pfvfspoof |= (1 << vf_target_shift);
4224 pfvfspoof &= ~(1 << vf_target_shift);
4225 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4229 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4230 * @hw: pointer to hardware structure
4231 * @enable: enable or disable switch for VLAN anti-spoofing
4232 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4235 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4237 int vf_target_reg = vf >> 3;
4238 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4241 if (hw->mac.type == ixgbe_mac_82598EB)
4244 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4246 pfvfspoof |= (1 << vf_target_shift);
4248 pfvfspoof &= ~(1 << vf_target_shift);
4249 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4253 * ixgbe_get_device_caps_generic - Get additional device capabilities
4254 * @hw: pointer to hardware structure
4255 * @device_caps: the EEPROM word with the extra device capabilities
4257 * This function will read the EEPROM location for the device capabilities,
4258 * and return the word through device_caps.
4260 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4262 DEBUGFUNC("ixgbe_get_device_caps_generic");
4264 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4266 return IXGBE_SUCCESS;
4270 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4271 * @hw: pointer to hardware structure
4274 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4279 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4281 /* Enable relaxed ordering */
4282 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4283 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4284 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4285 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4288 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4289 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4290 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4291 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4292 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4298 * ixgbe_calculate_checksum - Calculate checksum for buffer
4299 * @buffer: pointer to EEPROM
4300 * @length: size of EEPROM to calculate a checksum for
4301 * Calculates the checksum for some buffer on a specified length. The
4302 * checksum calculated is returned.
4304 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4309 DEBUGFUNC("ixgbe_calculate_checksum");
4314 for (i = 0; i < length; i++)
4317 return (u8) (0 - sum);
4321 * ixgbe_host_interface_command - Issue command to manageability block
4322 * @hw: pointer to the HW structure
4323 * @buffer: contains the command to write and where the return status will
4325 * @length: length of buffer, must be multiple of 4 bytes
4326 * @timeout: time in ms to wait for command completion
4327 * @return_data: read and return data from the buffer (true) or not (false)
4328 * Needed because FW structures are big endian and decoding of
4329 * these fields can be 8 bit or 16 bit based on command. Decoding
4330 * is not easily understood without making a table of commands.
4331 * So we will leave this up to the caller to read back the data
4334 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4335 * else returns semaphore error when encountering an error acquiring
4336 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4338 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4339 u32 length, u32 timeout, bool return_data)
4341 u32 hicr, i, bi, fwsts;
4342 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4347 DEBUGFUNC("ixgbe_host_interface_command");
4349 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4350 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4351 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4353 /* Take management host interface semaphore */
4354 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4359 /* Set bit 9 of FWSTS clearing FW reset indication */
4360 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4361 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4363 /* Check that the host interface is enabled. */
4364 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4365 if ((hicr & IXGBE_HICR_EN) == 0) {
4366 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4367 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4371 /* Calculate length in DWORDs. We must be DWORD aligned */
4372 if ((length % (sizeof(u32))) != 0) {
4373 DEBUGOUT("Buffer length failure, not aligned to dword");
4374 status = IXGBE_ERR_INVALID_ARGUMENT;
4378 dword_len = length >> 2;
4380 /* The device driver writes the relevant command block
4381 * into the ram area.
4383 for (i = 0; i < dword_len; i++)
4384 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4385 i, IXGBE_CPU_TO_LE32(buffer[i]));
4387 /* Setting this bit tells the ARC that a new command is pending. */
4388 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4390 for (i = 0; i < timeout; i++) {
4391 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4392 if (!(hicr & IXGBE_HICR_C))
4397 /* Check command completion */
4398 if ((timeout != 0 && i == timeout) ||
4399 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4400 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4401 "Command has failed with no status valid.\n");
4402 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4409 /* Calculate length in DWORDs */
4410 dword_len = hdr_size >> 2;
4412 /* first pull in the header so we know the buffer length */
4413 for (bi = 0; bi < dword_len; bi++) {
4414 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4415 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4418 /* If there is any thing in data position pull it in */
4419 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4423 if (length < buf_len + hdr_size) {
4424 DEBUGOUT("Buffer not large enough for reply message.\n");
4425 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4429 /* Calculate length in DWORDs, add 3 for odd lengths */
4430 dword_len = (buf_len + 3) >> 2;
4432 /* Pull in the rest of the buffer (bi is where we left off) */
4433 for (; bi <= dword_len; bi++) {
4434 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4435 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4439 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4445 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4446 * @hw: pointer to the HW structure
4447 * @maj: driver version major number
4448 * @min: driver version minor number
4449 * @build: driver version build number
4450 * @sub: driver version sub build number
4452 * Sends driver version number to firmware through the manageability
4453 * block. On success return IXGBE_SUCCESS
4454 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4455 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4457 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4460 struct ixgbe_hic_drv_info fw_cmd;
4462 s32 ret_val = IXGBE_SUCCESS;
4464 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4466 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4467 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4468 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4469 fw_cmd.port_num = (u8)hw->bus.func;
4470 fw_cmd.ver_maj = maj;
4471 fw_cmd.ver_min = min;
4472 fw_cmd.ver_build = build;
4473 fw_cmd.ver_sub = sub;
4474 fw_cmd.hdr.checksum = 0;
4475 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4476 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4480 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4481 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4483 IXGBE_HI_COMMAND_TIMEOUT,
4485 if (ret_val != IXGBE_SUCCESS)
4488 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4489 FW_CEM_RESP_STATUS_SUCCESS)
4490 ret_val = IXGBE_SUCCESS;
4492 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4501 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4502 * @hw: pointer to hardware structure
4503 * @num_pb: number of packet buffers to allocate
4504 * @headroom: reserve n KB of headroom
4505 * @strategy: packet buffer allocation strategy
4507 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4510 u32 pbsize = hw->mac.rx_pb_size;
4512 u32 rxpktsize, txpktsize, txpbthresh;
4514 /* Reserve headroom */
4520 /* Divide remaining packet buffer space amongst the number of packet
4521 * buffers requested using supplied strategy.
4524 case PBA_STRATEGY_WEIGHTED:
4525 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4526 * buffer with 5/8 of the packet buffer space.
4528 rxpktsize = (pbsize * 5) / (num_pb * 4);
4529 pbsize -= rxpktsize * (num_pb / 2);
4530 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4531 for (; i < (num_pb / 2); i++)
4532 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4533 /* Fall through to configure remaining packet buffers */
4534 case PBA_STRATEGY_EQUAL:
4535 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4536 for (; i < num_pb; i++)
4537 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4543 /* Only support an equally distributed Tx packet buffer strategy. */
4544 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4545 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4546 for (i = 0; i < num_pb; i++) {
4547 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4548 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4551 /* Clear unused TCs, if any, to zero buffer size*/
4552 for (; i < IXGBE_MAX_PB; i++) {
4553 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4554 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4555 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4560 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4561 * @hw: pointer to the hardware structure
4563 * The 82599 and x540 MACs can experience issues if TX work is still pending
4564 * when a reset occurs. This function prevents this by flushing the PCIe
4565 * buffers on the system.
4567 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4569 u32 gcr_ext, hlreg0, i, poll;
4573 * If double reset is not requested then all transactions should
4574 * already be clear and as such there is no work to do
4576 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4580 * Set loopback enable to prevent any transmits from being sent
4581 * should the link come up. This assumes that the RXCTRL.RXEN bit
4582 * has already been cleared.
4584 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4585 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4587 /* Wait for a last completion before clearing buffers */
4588 IXGBE_WRITE_FLUSH(hw);
4592 * Before proceeding, make sure that the PCIe block does not have
4593 * transactions pending.
4595 poll = ixgbe_pcie_timeout_poll(hw);
4596 for (i = 0; i < poll; i++) {
4598 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4599 if (IXGBE_REMOVED(hw->hw_addr))
4601 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4606 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4607 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4608 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4609 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4611 /* Flush all writes and allow 20usec for all transactions to clear */
4612 IXGBE_WRITE_FLUSH(hw);
4615 /* restore previous register values */
4616 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4617 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4620 STATIC const u8 ixgbe_emc_temp_data[4] = {
4621 IXGBE_EMC_INTERNAL_DATA,
4622 IXGBE_EMC_DIODE1_DATA,
4623 IXGBE_EMC_DIODE2_DATA,
4624 IXGBE_EMC_DIODE3_DATA
4626 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4627 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4628 IXGBE_EMC_DIODE1_THERM_LIMIT,
4629 IXGBE_EMC_DIODE2_THERM_LIMIT,
4630 IXGBE_EMC_DIODE3_THERM_LIMIT
4634 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4635 * @hw: pointer to hardware structure
4636 * @data: pointer to the thermal sensor data structure
4638 * Returns the thermal sensor data structure
4640 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4642 s32 status = IXGBE_SUCCESS;
4650 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4652 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4654 /* Only support thermal sensors attached to 82599 physical port 0 */
4655 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4656 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4657 status = IXGBE_NOT_IMPLEMENTED;
4661 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4665 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4666 status = IXGBE_NOT_IMPLEMENTED;
4670 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4674 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4675 != IXGBE_ETS_TYPE_EMC) {
4676 status = IXGBE_NOT_IMPLEMENTED;
4680 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4681 if (num_sensors > IXGBE_MAX_SENSORS)
4682 num_sensors = IXGBE_MAX_SENSORS;
4684 for (i = 0; i < num_sensors; i++) {
4685 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4690 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4691 IXGBE_ETS_DATA_INDEX_SHIFT);
4692 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4693 IXGBE_ETS_DATA_LOC_SHIFT);
4695 if (sensor_location != 0) {
4696 status = hw->phy.ops.read_i2c_byte(hw,
4697 ixgbe_emc_temp_data[sensor_index],
4698 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4699 &data->sensor[i].temp);
4709 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4710 * @hw: pointer to hardware structure
4712 * Inits the thermal sensor thresholds according to the NVM map
4713 * and save off the threshold and location values into mac.thermal_sensor_data
4715 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4717 s32 status = IXGBE_SUCCESS;
4722 u8 low_thresh_delta;
4728 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4730 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4732 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4734 /* Only support thermal sensors attached to 82599 physical port 0 */
4735 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4736 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4737 return IXGBE_NOT_IMPLEMENTED;
4739 offset = IXGBE_ETS_CFG;
4740 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4742 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4743 return IXGBE_NOT_IMPLEMENTED;
4745 offset = ets_offset;
4746 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4748 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4749 != IXGBE_ETS_TYPE_EMC)
4750 return IXGBE_NOT_IMPLEMENTED;
4752 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4753 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4754 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4756 for (i = 0; i < num_sensors; i++) {
4757 offset = ets_offset + 1 + i;
4758 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4759 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4760 "eeprom read at offset %d failed",
4764 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4765 IXGBE_ETS_DATA_INDEX_SHIFT);
4766 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4767 IXGBE_ETS_DATA_LOC_SHIFT);
4768 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4770 hw->phy.ops.write_i2c_byte(hw,
4771 ixgbe_emc_therm_limit[sensor_index],
4772 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4774 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4775 data->sensor[i].location = sensor_location;
4776 data->sensor[i].caution_thresh = therm_limit;
4777 data->sensor[i].max_op_thresh = therm_limit -
4784 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4785 "eeprom read at offset %d failed", offset);
4786 return IXGBE_NOT_IMPLEMENTED;
4791 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4792 * @hw: pointer to hardware structure
4793 * @map: pointer to u8 arr for returning map
4795 * Read the rtrup2tc HW register and resolve its content into map
4797 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4801 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4802 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4803 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4804 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4808 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4813 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4814 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4815 if (hw->mac.type != ixgbe_mac_82598EB) {
4816 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4817 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4818 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4819 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4820 hw->mac.set_lben = true;
4822 hw->mac.set_lben = false;
4825 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4826 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4830 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4835 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4836 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4838 if (hw->mac.type != ixgbe_mac_82598EB) {
4839 if (hw->mac.set_lben) {
4840 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4841 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4842 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4843 hw->mac.set_lben = false;
4849 * ixgbe_mng_present - returns true when management capability is present
4850 * @hw: pointer to hardware structure
4852 bool ixgbe_mng_present(struct ixgbe_hw *hw)
4856 if (hw->mac.type < ixgbe_mac_82599EB)
4859 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4860 fwsm &= IXGBE_FWSM_MODE_MASK;
4861 return fwsm == IXGBE_FWSM_FW_MODE_PT;
4865 * ixgbe_mng_enabled - Is the manageability engine enabled?
4866 * @hw: pointer to hardware structure
4868 * Returns true if the manageability engine is enabled.
4870 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
4872 u32 fwsm, manc, factps;
4874 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4875 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
4878 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
4879 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
4882 if (hw->mac.type <= ixgbe_mac_X540) {
4883 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
4884 if (factps & IXGBE_FACTPS_MNGCG)
4892 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
4893 * @hw: pointer to hardware structure
4894 * @speed: new link speed
4895 * @autoneg_wait_to_complete: true when waiting for completion is needed
4897 * Set the link speed in the MAC and/or PHY register and restarts link.
4899 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4900 ixgbe_link_speed speed,
4901 bool autoneg_wait_to_complete)
4903 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4904 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4905 s32 status = IXGBE_SUCCESS;
4908 bool autoneg, link_up = false;
4910 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
4912 /* Mask off requested but non-supported speeds */
4913 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
4914 if (status != IXGBE_SUCCESS)
4917 speed &= link_speed;
4919 /* Try each speed one by one, highest priority first. We do this in
4920 * software because 10Gb fiber doesn't support speed autonegotiation.
4922 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4924 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4926 /* If we already have link at this speed, just jump out */
4927 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
4928 if (status != IXGBE_SUCCESS)
4931 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
4934 /* Set the module link speed */
4935 switch (hw->phy.media_type) {
4936 case ixgbe_media_type_fiber:
4937 ixgbe_set_rate_select_speed(hw,
4938 IXGBE_LINK_SPEED_10GB_FULL);
4940 case ixgbe_media_type_fiber_qsfp:
4941 /* QSFP module automatically detects MAC link speed */
4944 DEBUGOUT("Unexpected media type.\n");
4948 /* Allow module to change analog characteristics (1G->10G) */
4951 status = ixgbe_setup_mac_link(hw,
4952 IXGBE_LINK_SPEED_10GB_FULL,
4953 autoneg_wait_to_complete);
4954 if (status != IXGBE_SUCCESS)
4957 /* Flap the Tx laser if it has not already been done */
4958 ixgbe_flap_tx_laser(hw);
4960 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
4961 * Section 73.10.2, we may have to wait up to 500ms if KR is
4962 * attempted. 82599 uses the same timing for 10g SFI.
4964 for (i = 0; i < 5; i++) {
4965 /* Wait for the link partner to also set speed */
4968 /* If we have link, just jump out */
4969 status = ixgbe_check_link(hw, &link_speed,
4971 if (status != IXGBE_SUCCESS)
4979 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4981 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4982 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4984 /* If we already have link at this speed, just jump out */
4985 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
4986 if (status != IXGBE_SUCCESS)
4989 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
4992 /* Set the module link speed */
4993 switch (hw->phy.media_type) {
4994 case ixgbe_media_type_fiber:
4995 ixgbe_set_rate_select_speed(hw,
4996 IXGBE_LINK_SPEED_1GB_FULL);
4998 case ixgbe_media_type_fiber_qsfp:
4999 /* QSFP module automatically detects link speed */
5002 DEBUGOUT("Unexpected media type.\n");
5006 /* Allow module to change analog characteristics (10G->1G) */
5009 status = ixgbe_setup_mac_link(hw,
5010 IXGBE_LINK_SPEED_1GB_FULL,
5011 autoneg_wait_to_complete);
5012 if (status != IXGBE_SUCCESS)
5015 /* Flap the Tx laser if it has not already been done */
5016 ixgbe_flap_tx_laser(hw);
5018 /* Wait for the link partner to also set speed */
5021 /* If we have link, just jump out */
5022 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5023 if (status != IXGBE_SUCCESS)
5030 /* We didn't get link. Configure back to the highest speed we tried,
5031 * (if there was more than one). We call ourselves back with just the
5032 * single highest speed that the user requested.
5035 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5037 autoneg_wait_to_complete);
5040 /* Set autoneg_advertised value based on input link speed */
5041 hw->phy.autoneg_advertised = 0;
5043 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5044 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5046 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5047 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5053 * ixgbe_set_soft_rate_select_speed - Set module link speed
5054 * @hw: pointer to hardware structure
5055 * @speed: link speed to set
5057 * Set module link speed via the soft rate select.
5059 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5060 ixgbe_link_speed speed)
5066 case IXGBE_LINK_SPEED_10GB_FULL:
5067 /* one bit mask same as setting on */
5068 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5070 case IXGBE_LINK_SPEED_1GB_FULL:
5071 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5074 DEBUGOUT("Invalid fixed module speed\n");
5079 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5080 IXGBE_I2C_EEPROM_DEV_ADDR2,
5083 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5087 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5089 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5090 IXGBE_I2C_EEPROM_DEV_ADDR2,
5093 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5098 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5099 IXGBE_I2C_EEPROM_DEV_ADDR2,
5102 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5106 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5108 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5109 IXGBE_I2C_EEPROM_DEV_ADDR2,
5112 DEBUGOUT("Failed to write Rx Rate Select RS1\n");