1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
117 /* RAR, Multicast, VLAN */
118 mac->ops.set_rar = ixgbe_set_rar_generic;
119 mac->ops.clear_rar = ixgbe_clear_rar_generic;
120 mac->ops.insert_mac_addr = NULL;
121 mac->ops.set_vmdq = NULL;
122 mac->ops.clear_vmdq = NULL;
123 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
124 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
125 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
126 mac->ops.enable_mc = ixgbe_enable_mc_generic;
127 mac->ops.disable_mc = ixgbe_disable_mc_generic;
128 mac->ops.clear_vfta = NULL;
129 mac->ops.set_vfta = NULL;
130 mac->ops.set_vlvf = NULL;
131 mac->ops.init_uta_tables = NULL;
132 mac->ops.enable_rx = ixgbe_enable_rx_generic;
133 mac->ops.disable_rx = ixgbe_disable_rx_generic;
136 mac->ops.fc_enable = ixgbe_fc_enable_generic;
137 mac->ops.setup_fc = ixgbe_setup_fc_generic;
138 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
141 mac->ops.get_link_capabilities = NULL;
142 mac->ops.setup_link = NULL;
143 mac->ops.check_link = NULL;
144 mac->ops.dmac_config = NULL;
145 mac->ops.dmac_update_tcs = NULL;
146 mac->ops.dmac_config_tcs = NULL;
148 return IXGBE_SUCCESS;
152 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
154 * @hw: pointer to hardware structure
156 * This function returns true if the device supports flow control
157 * autonegotiation, and false if it does not.
160 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
162 bool supported = false;
163 ixgbe_link_speed speed;
166 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
168 switch (hw->phy.media_type) {
169 case ixgbe_media_type_fiber_qsfp:
170 case ixgbe_media_type_fiber:
171 hw->mac.ops.check_link(hw, &speed, &link_up, false);
172 /* if link is down, assume supported */
174 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
179 case ixgbe_media_type_backplane:
182 case ixgbe_media_type_copper:
183 /* only some copper devices support flow control autoneg */
184 switch (hw->device_id) {
185 case IXGBE_DEV_ID_82599_T3_LOM:
186 case IXGBE_DEV_ID_X540T:
187 case IXGBE_DEV_ID_X540T1:
188 case IXGBE_DEV_ID_X550T:
189 case IXGBE_DEV_ID_X550T1:
190 case IXGBE_DEV_ID_X550EM_X_10G_T:
191 case IXGBE_DEV_ID_X550EM_A_10G_T:
201 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
202 "Device %x does not support flow control autoneg",
208 * ixgbe_setup_fc_generic - Set up flow control
209 * @hw: pointer to hardware structure
211 * Called at init time to set up flow control.
213 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
215 s32 ret_val = IXGBE_SUCCESS;
216 u32 reg = 0, reg_bp = 0;
220 DEBUGFUNC("ixgbe_setup_fc_generic");
222 /* Validate the requested mode */
223 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
224 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
225 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
226 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
231 * 10gig parts do not have a word in the EEPROM to determine the
232 * default flow control setting, so we explicitly set it to full.
234 if (hw->fc.requested_mode == ixgbe_fc_default)
235 hw->fc.requested_mode = ixgbe_fc_full;
238 * Set up the 1G and 10G flow control advertisement registers so the
239 * HW will be able to do fc autoneg once the cable is plugged in. If
240 * we link at 10G, the 1G advertisement is harmless and vice versa.
242 switch (hw->phy.media_type) {
243 case ixgbe_media_type_backplane:
244 /* some MAC's need RMW protection on AUTOC */
245 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
246 if (ret_val != IXGBE_SUCCESS)
249 /* only backplane uses autoc so fall though */
250 case ixgbe_media_type_fiber_qsfp:
251 case ixgbe_media_type_fiber:
252 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
255 case ixgbe_media_type_copper:
256 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
257 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
264 * The possible values of fc.requested_mode are:
265 * 0: Flow control is completely disabled
266 * 1: Rx flow control is enabled (we can receive pause frames,
267 * but not send pause frames).
268 * 2: Tx flow control is enabled (we can send pause frames but
269 * we do not support receiving pause frames).
270 * 3: Both Rx and Tx flow control (symmetric) are enabled.
273 switch (hw->fc.requested_mode) {
275 /* Flow control completely disabled by software override. */
276 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
277 if (hw->phy.media_type == ixgbe_media_type_backplane)
278 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
279 IXGBE_AUTOC_ASM_PAUSE);
280 else if (hw->phy.media_type == ixgbe_media_type_copper)
281 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
283 case ixgbe_fc_tx_pause:
285 * Tx Flow control is enabled, and Rx Flow control is
286 * disabled by software override.
288 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
289 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
290 if (hw->phy.media_type == ixgbe_media_type_backplane) {
291 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
292 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
293 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
294 reg_cu |= IXGBE_TAF_ASM_PAUSE;
295 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
298 case ixgbe_fc_rx_pause:
300 * Rx Flow control is enabled and Tx Flow control is
301 * disabled by software override. Since there really
302 * isn't a way to advertise that we are capable of RX
303 * Pause ONLY, we will advertise that we support both
304 * symmetric and asymmetric Rx PAUSE, as such we fall
305 * through to the fc_full statement. Later, we will
306 * disable the adapter's ability to send PAUSE frames.
309 /* Flow control (both Rx and Tx) is enabled by SW override. */
310 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
311 if (hw->phy.media_type == ixgbe_media_type_backplane)
312 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
313 IXGBE_AUTOC_ASM_PAUSE;
314 else if (hw->phy.media_type == ixgbe_media_type_copper)
315 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
318 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
319 "Flow control param set incorrectly\n");
320 ret_val = IXGBE_ERR_CONFIG;
325 if (hw->mac.type < ixgbe_mac_X540) {
327 * Enable auto-negotiation between the MAC & PHY;
328 * the MAC will advertise clause 37 flow control.
330 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
331 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
333 /* Disable AN timeout */
334 if (hw->fc.strict_ieee)
335 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
337 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
338 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
342 * AUTOC restart handles negotiation of 1G and 10G on backplane
343 * and copper. There is no need to set the PCS1GCTL register.
346 if (hw->phy.media_type == ixgbe_media_type_backplane) {
347 reg_bp |= IXGBE_AUTOC_AN_RESTART;
348 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
351 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
352 (ixgbe_device_supports_autoneg_fc(hw))) {
353 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
354 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
357 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
363 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
364 * @hw: pointer to hardware structure
366 * Starts the hardware by filling the bus info structure and media type, clears
367 * all on chip counters, initializes receive address registers, multicast
368 * table, VLAN filter table, calls routine to set up link and flow control
369 * settings, and leaves transmit and receive units disabled and uninitialized
371 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
376 DEBUGFUNC("ixgbe_start_hw_generic");
378 /* Set the media type */
379 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
381 /* PHY ops initialization must be done in reset_hw() */
383 /* Clear the VLAN filter table */
384 hw->mac.ops.clear_vfta(hw);
386 /* Clear statistics registers */
387 hw->mac.ops.clear_hw_cntrs(hw);
389 /* Set No Snoop Disable */
390 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
391 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
392 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
393 IXGBE_WRITE_FLUSH(hw);
395 /* Setup flow control */
396 ret_val = ixgbe_setup_fc(hw);
397 if (ret_val != IXGBE_SUCCESS)
400 /* Clear adapter stopped flag */
401 hw->adapter_stopped = false;
408 * ixgbe_start_hw_gen2 - Init sequence for common device family
409 * @hw: pointer to hw structure
411 * Performs the init sequence common to the second generation
413 * Devices in the second generation:
417 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
422 /* Clear the rate limiters */
423 for (i = 0; i < hw->mac.max_tx_queues; i++) {
424 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
425 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
427 IXGBE_WRITE_FLUSH(hw);
429 /* Disable relaxed ordering */
430 for (i = 0; i < hw->mac.max_tx_queues; i++) {
431 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
432 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
433 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
436 for (i = 0; i < hw->mac.max_rx_queues; i++) {
437 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
438 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
439 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
440 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
443 return IXGBE_SUCCESS;
447 * ixgbe_init_hw_generic - Generic hardware initialization
448 * @hw: pointer to hardware structure
450 * Initialize the hardware by resetting the hardware, filling the bus info
451 * structure and media type, clears all on chip counters, initializes receive
452 * address registers, multicast table, VLAN filter table, calls routine to set
453 * up link and flow control settings, and leaves transmit and receive units
454 * disabled and uninitialized
456 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
460 DEBUGFUNC("ixgbe_init_hw_generic");
462 /* Reset the hardware */
463 status = hw->mac.ops.reset_hw(hw);
465 if (status == IXGBE_SUCCESS) {
467 status = hw->mac.ops.start_hw(hw);
474 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
475 * @hw: pointer to hardware structure
477 * Clears all hardware statistics counters by reading them from the hardware
478 * Statistics counters are clear on read.
480 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
484 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
486 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
487 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
488 IXGBE_READ_REG(hw, IXGBE_ERRBC);
489 IXGBE_READ_REG(hw, IXGBE_MSPDC);
490 for (i = 0; i < 8; i++)
491 IXGBE_READ_REG(hw, IXGBE_MPC(i));
493 IXGBE_READ_REG(hw, IXGBE_MLFC);
494 IXGBE_READ_REG(hw, IXGBE_MRFC);
495 IXGBE_READ_REG(hw, IXGBE_RLEC);
496 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
497 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
498 if (hw->mac.type >= ixgbe_mac_82599EB) {
499 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
500 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
502 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
503 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
506 for (i = 0; i < 8; i++) {
507 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
508 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
509 if (hw->mac.type >= ixgbe_mac_82599EB) {
510 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
511 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
513 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
514 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
517 if (hw->mac.type >= ixgbe_mac_82599EB)
518 for (i = 0; i < 8; i++)
519 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
520 IXGBE_READ_REG(hw, IXGBE_PRC64);
521 IXGBE_READ_REG(hw, IXGBE_PRC127);
522 IXGBE_READ_REG(hw, IXGBE_PRC255);
523 IXGBE_READ_REG(hw, IXGBE_PRC511);
524 IXGBE_READ_REG(hw, IXGBE_PRC1023);
525 IXGBE_READ_REG(hw, IXGBE_PRC1522);
526 IXGBE_READ_REG(hw, IXGBE_GPRC);
527 IXGBE_READ_REG(hw, IXGBE_BPRC);
528 IXGBE_READ_REG(hw, IXGBE_MPRC);
529 IXGBE_READ_REG(hw, IXGBE_GPTC);
530 IXGBE_READ_REG(hw, IXGBE_GORCL);
531 IXGBE_READ_REG(hw, IXGBE_GORCH);
532 IXGBE_READ_REG(hw, IXGBE_GOTCL);
533 IXGBE_READ_REG(hw, IXGBE_GOTCH);
534 if (hw->mac.type == ixgbe_mac_82598EB)
535 for (i = 0; i < 8; i++)
536 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
537 IXGBE_READ_REG(hw, IXGBE_RUC);
538 IXGBE_READ_REG(hw, IXGBE_RFC);
539 IXGBE_READ_REG(hw, IXGBE_ROC);
540 IXGBE_READ_REG(hw, IXGBE_RJC);
541 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
542 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
543 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
544 IXGBE_READ_REG(hw, IXGBE_TORL);
545 IXGBE_READ_REG(hw, IXGBE_TORH);
546 IXGBE_READ_REG(hw, IXGBE_TPR);
547 IXGBE_READ_REG(hw, IXGBE_TPT);
548 IXGBE_READ_REG(hw, IXGBE_PTC64);
549 IXGBE_READ_REG(hw, IXGBE_PTC127);
550 IXGBE_READ_REG(hw, IXGBE_PTC255);
551 IXGBE_READ_REG(hw, IXGBE_PTC511);
552 IXGBE_READ_REG(hw, IXGBE_PTC1023);
553 IXGBE_READ_REG(hw, IXGBE_PTC1522);
554 IXGBE_READ_REG(hw, IXGBE_MPTC);
555 IXGBE_READ_REG(hw, IXGBE_BPTC);
556 for (i = 0; i < 16; i++) {
557 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
558 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
559 if (hw->mac.type >= ixgbe_mac_82599EB) {
560 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
561 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
562 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
563 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
564 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
566 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
567 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
571 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
573 ixgbe_identify_phy(hw);
574 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
575 IXGBE_MDIO_PCS_DEV_TYPE, &i);
576 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
577 IXGBE_MDIO_PCS_DEV_TYPE, &i);
578 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
579 IXGBE_MDIO_PCS_DEV_TYPE, &i);
580 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
581 IXGBE_MDIO_PCS_DEV_TYPE, &i);
584 return IXGBE_SUCCESS;
588 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
589 * @hw: pointer to hardware structure
590 * @pba_num: stores the part number string from the EEPROM
591 * @pba_num_size: part number string buffer length
593 * Reads the part number string from the EEPROM.
595 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
604 DEBUGFUNC("ixgbe_read_pba_string_generic");
606 if (pba_num == NULL) {
607 DEBUGOUT("PBA string buffer was null\n");
608 return IXGBE_ERR_INVALID_ARGUMENT;
611 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
613 DEBUGOUT("NVM Read Error\n");
617 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
619 DEBUGOUT("NVM Read Error\n");
624 * if data is not ptr guard the PBA must be in legacy format which
625 * means pba_ptr is actually our second data word for the PBA number
626 * and we can decode it into an ascii string
628 if (data != IXGBE_PBANUM_PTR_GUARD) {
629 DEBUGOUT("NVM PBA number is not stored as string\n");
631 /* we will need 11 characters to store the PBA */
632 if (pba_num_size < 11) {
633 DEBUGOUT("PBA string buffer too small\n");
634 return IXGBE_ERR_NO_SPACE;
637 /* extract hex string from data and pba_ptr */
638 pba_num[0] = (data >> 12) & 0xF;
639 pba_num[1] = (data >> 8) & 0xF;
640 pba_num[2] = (data >> 4) & 0xF;
641 pba_num[3] = data & 0xF;
642 pba_num[4] = (pba_ptr >> 12) & 0xF;
643 pba_num[5] = (pba_ptr >> 8) & 0xF;
646 pba_num[8] = (pba_ptr >> 4) & 0xF;
647 pba_num[9] = pba_ptr & 0xF;
649 /* put a null character on the end of our string */
652 /* switch all the data but the '-' to hex char */
653 for (offset = 0; offset < 10; offset++) {
654 if (pba_num[offset] < 0xA)
655 pba_num[offset] += '0';
656 else if (pba_num[offset] < 0x10)
657 pba_num[offset] += 'A' - 0xA;
660 return IXGBE_SUCCESS;
663 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
665 DEBUGOUT("NVM Read Error\n");
669 if (length == 0xFFFF || length == 0) {
670 DEBUGOUT("NVM PBA number section invalid length\n");
671 return IXGBE_ERR_PBA_SECTION;
674 /* check if pba_num buffer is big enough */
675 if (pba_num_size < (((u32)length * 2) - 1)) {
676 DEBUGOUT("PBA string buffer too small\n");
677 return IXGBE_ERR_NO_SPACE;
680 /* trim pba length from start of string */
684 for (offset = 0; offset < length; offset++) {
685 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
687 DEBUGOUT("NVM Read Error\n");
690 pba_num[offset * 2] = (u8)(data >> 8);
691 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
693 pba_num[offset * 2] = '\0';
695 return IXGBE_SUCCESS;
699 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
700 * @hw: pointer to hardware structure
701 * @pba_num: stores the part number from the EEPROM
703 * Reads the part number from the EEPROM.
705 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
710 DEBUGFUNC("ixgbe_read_pba_num_generic");
712 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
714 DEBUGOUT("NVM Read Error\n");
716 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
717 DEBUGOUT("NVM Not supported\n");
718 return IXGBE_NOT_IMPLEMENTED;
720 *pba_num = (u32)(data << 16);
722 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
724 DEBUGOUT("NVM Read Error\n");
729 return IXGBE_SUCCESS;
734 * @hw: pointer to the HW structure
735 * @eeprom_buf: optional pointer to EEPROM image
736 * @eeprom_buf_size: size of EEPROM image in words
737 * @max_pba_block_size: PBA block size limit
738 * @pba: pointer to output PBA structure
740 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
741 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
744 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
745 u32 eeprom_buf_size, u16 max_pba_block_size,
746 struct ixgbe_pba *pba)
752 return IXGBE_ERR_PARAM;
754 if (eeprom_buf == NULL) {
755 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
760 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
761 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
762 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
764 return IXGBE_ERR_PARAM;
768 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
769 if (pba->pba_block == NULL)
770 return IXGBE_ERR_PARAM;
772 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
778 if (pba_block_size > max_pba_block_size)
779 return IXGBE_ERR_PARAM;
781 if (eeprom_buf == NULL) {
782 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
788 if (eeprom_buf_size > (u32)(pba->word[1] +
790 memcpy(pba->pba_block,
791 &eeprom_buf[pba->word[1]],
792 pba_block_size * sizeof(u16));
794 return IXGBE_ERR_PARAM;
799 return IXGBE_SUCCESS;
803 * ixgbe_write_pba_raw
804 * @hw: pointer to the HW structure
805 * @eeprom_buf: optional pointer to EEPROM image
806 * @eeprom_buf_size: size of EEPROM image in words
807 * @pba: pointer to PBA structure
809 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
810 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
813 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
814 u32 eeprom_buf_size, struct ixgbe_pba *pba)
819 return IXGBE_ERR_PARAM;
821 if (eeprom_buf == NULL) {
822 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
827 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
828 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
829 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
831 return IXGBE_ERR_PARAM;
835 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
836 if (pba->pba_block == NULL)
837 return IXGBE_ERR_PARAM;
839 if (eeprom_buf == NULL) {
840 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
846 if (eeprom_buf_size > (u32)(pba->word[1] +
847 pba->pba_block[0])) {
848 memcpy(&eeprom_buf[pba->word[1]],
850 pba->pba_block[0] * sizeof(u16));
852 return IXGBE_ERR_PARAM;
857 return IXGBE_SUCCESS;
861 * ixgbe_get_pba_block_size
862 * @hw: pointer to the HW structure
863 * @eeprom_buf: optional pointer to EEPROM image
864 * @eeprom_buf_size: size of EEPROM image in words
865 * @pba_data_size: pointer to output variable
867 * Returns the size of the PBA block in words. Function operates on EEPROM
868 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
872 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
873 u32 eeprom_buf_size, u16 *pba_block_size)
879 DEBUGFUNC("ixgbe_get_pba_block_size");
881 if (eeprom_buf == NULL) {
882 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
887 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
888 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
889 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
891 return IXGBE_ERR_PARAM;
895 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
896 if (eeprom_buf == NULL) {
897 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
902 if (eeprom_buf_size > pba_word[1])
903 length = eeprom_buf[pba_word[1] + 0];
905 return IXGBE_ERR_PARAM;
908 if (length == 0xFFFF || length == 0)
909 return IXGBE_ERR_PBA_SECTION;
911 /* PBA number in legacy format, there is no PBA Block. */
915 if (pba_block_size != NULL)
916 *pba_block_size = length;
918 return IXGBE_SUCCESS;
922 * ixgbe_get_mac_addr_generic - Generic get MAC address
923 * @hw: pointer to hardware structure
924 * @mac_addr: Adapter MAC address
926 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
927 * A reset of the adapter must be performed prior to calling this function
928 * in order for the MAC address to have been loaded from the EEPROM into RAR0
930 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
936 DEBUGFUNC("ixgbe_get_mac_addr_generic");
938 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
939 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
941 for (i = 0; i < 4; i++)
942 mac_addr[i] = (u8)(rar_low >> (i*8));
944 for (i = 0; i < 2; i++)
945 mac_addr[i+4] = (u8)(rar_high >> (i*8));
947 return IXGBE_SUCCESS;
951 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
952 * @hw: pointer to hardware structure
953 * @link_status: the link status returned by the PCI config space
955 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
957 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
959 struct ixgbe_mac_info *mac = &hw->mac;
961 if (hw->bus.type == ixgbe_bus_type_unknown)
962 hw->bus.type = ixgbe_bus_type_pci_express;
964 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
965 case IXGBE_PCI_LINK_WIDTH_1:
966 hw->bus.width = ixgbe_bus_width_pcie_x1;
968 case IXGBE_PCI_LINK_WIDTH_2:
969 hw->bus.width = ixgbe_bus_width_pcie_x2;
971 case IXGBE_PCI_LINK_WIDTH_4:
972 hw->bus.width = ixgbe_bus_width_pcie_x4;
974 case IXGBE_PCI_LINK_WIDTH_8:
975 hw->bus.width = ixgbe_bus_width_pcie_x8;
978 hw->bus.width = ixgbe_bus_width_unknown;
982 switch (link_status & IXGBE_PCI_LINK_SPEED) {
983 case IXGBE_PCI_LINK_SPEED_2500:
984 hw->bus.speed = ixgbe_bus_speed_2500;
986 case IXGBE_PCI_LINK_SPEED_5000:
987 hw->bus.speed = ixgbe_bus_speed_5000;
989 case IXGBE_PCI_LINK_SPEED_8000:
990 hw->bus.speed = ixgbe_bus_speed_8000;
993 hw->bus.speed = ixgbe_bus_speed_unknown;
997 mac->ops.set_lan_id(hw);
1001 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1002 * @hw: pointer to hardware structure
1004 * Gets the PCI bus info (speed, width, type) then calls helper function to
1005 * store this data within the ixgbe_hw structure.
1007 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1011 DEBUGFUNC("ixgbe_get_bus_info_generic");
1013 /* Get the negotiated link width and speed from PCI config space */
1014 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1016 ixgbe_set_pci_config_data_generic(hw, link_status);
1018 return IXGBE_SUCCESS;
1022 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1023 * @hw: pointer to the HW structure
1025 * Determines the LAN function id by reading memory-mapped registers and swaps
1026 * the port value if requested, and set MAC instance for devices that share
1029 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1031 struct ixgbe_bus_info *bus = &hw->bus;
1035 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1037 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1038 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1039 bus->lan_id = (u8)bus->func;
1041 /* check for a port swap */
1042 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1043 if (reg & IXGBE_FACTPS_LFS)
1046 /* Get MAC instance from EEPROM for configuring CS4227 */
1047 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1048 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1049 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1050 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1055 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1056 * @hw: pointer to hardware structure
1058 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1059 * disables transmit and receive units. The adapter_stopped flag is used by
1060 * the shared code and drivers to determine if the adapter is in a stopped
1061 * state and should not touch the hardware.
1063 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1068 DEBUGFUNC("ixgbe_stop_adapter_generic");
1071 * Set the adapter_stopped flag so other driver functions stop touching
1074 hw->adapter_stopped = true;
1076 /* Disable the receive unit */
1077 ixgbe_disable_rx(hw);
1079 /* Clear interrupt mask to stop interrupts from being generated */
1080 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1082 /* Clear any pending interrupts, flush previous writes */
1083 IXGBE_READ_REG(hw, IXGBE_EICR);
1085 /* Disable the transmit unit. Each queue must be disabled. */
1086 for (i = 0; i < hw->mac.max_tx_queues; i++)
1087 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1089 /* Disable the receive unit by stopping each queue */
1090 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1091 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1092 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1093 reg_val |= IXGBE_RXDCTL_SWFLSH;
1094 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1097 /* flush all queues disables */
1098 IXGBE_WRITE_FLUSH(hw);
1102 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1103 * access and verify no pending requests
1105 return ixgbe_disable_pcie_master(hw);
1109 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1110 * @hw: pointer to hardware structure
1111 * @index: led number to turn on
1113 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1115 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1117 DEBUGFUNC("ixgbe_led_on_generic");
1119 /* To turn on the LED, set mode to ON. */
1120 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1121 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1122 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1123 IXGBE_WRITE_FLUSH(hw);
1125 return IXGBE_SUCCESS;
1129 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1130 * @hw: pointer to hardware structure
1131 * @index: led number to turn off
1133 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1135 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1137 DEBUGFUNC("ixgbe_led_off_generic");
1139 /* To turn off the LED, set mode to OFF. */
1140 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1141 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1142 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1143 IXGBE_WRITE_FLUSH(hw);
1145 return IXGBE_SUCCESS;
1149 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1150 * @hw: pointer to hardware structure
1152 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1153 * ixgbe_hw struct in order to set up EEPROM access.
1155 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1157 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1161 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1163 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1164 eeprom->type = ixgbe_eeprom_none;
1165 /* Set default semaphore delay to 10ms which is a well
1167 eeprom->semaphore_delay = 10;
1168 /* Clear EEPROM page size, it will be initialized as needed */
1169 eeprom->word_page_size = 0;
1172 * Check for EEPROM present first.
1173 * If not present leave as none
1175 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1176 if (eec & IXGBE_EEC_PRES) {
1177 eeprom->type = ixgbe_eeprom_spi;
1180 * SPI EEPROM is assumed here. This code would need to
1181 * change if a future EEPROM is not SPI.
1183 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1184 IXGBE_EEC_SIZE_SHIFT);
1185 eeprom->word_size = 1 << (eeprom_size +
1186 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1189 if (eec & IXGBE_EEC_ADDR_SIZE)
1190 eeprom->address_bits = 16;
1192 eeprom->address_bits = 8;
1193 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1194 "%d\n", eeprom->type, eeprom->word_size,
1195 eeprom->address_bits);
1198 return IXGBE_SUCCESS;
1202 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1203 * @hw: pointer to hardware structure
1204 * @offset: offset within the EEPROM to write
1205 * @words: number of word(s)
1206 * @data: 16 bit word(s) to write to EEPROM
1208 * Reads 16 bit word(s) from EEPROM through bit-bang method
1210 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1211 u16 words, u16 *data)
1213 s32 status = IXGBE_SUCCESS;
1216 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1218 hw->eeprom.ops.init_params(hw);
1221 status = IXGBE_ERR_INVALID_ARGUMENT;
1225 if (offset + words > hw->eeprom.word_size) {
1226 status = IXGBE_ERR_EEPROM;
1231 * The EEPROM page size cannot be queried from the chip. We do lazy
1232 * initialization. It is worth to do that when we write large buffer.
1234 if ((hw->eeprom.word_page_size == 0) &&
1235 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1236 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1239 * We cannot hold synchronization semaphores for too long
1240 * to avoid other entity starvation. However it is more efficient
1241 * to read in bursts than synchronizing access for each word.
1243 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1244 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1245 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1246 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1249 if (status != IXGBE_SUCCESS)
1258 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1259 * @hw: pointer to hardware structure
1260 * @offset: offset within the EEPROM to be written to
1261 * @words: number of word(s)
1262 * @data: 16 bit word(s) to be written to the EEPROM
1264 * If ixgbe_eeprom_update_checksum is not called after this function, the
1265 * EEPROM will most likely contain an invalid checksum.
1267 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1268 u16 words, u16 *data)
1274 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1276 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1278 /* Prepare the EEPROM for writing */
1279 status = ixgbe_acquire_eeprom(hw);
1281 if (status == IXGBE_SUCCESS) {
1282 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1283 ixgbe_release_eeprom(hw);
1284 status = IXGBE_ERR_EEPROM;
1288 if (status == IXGBE_SUCCESS) {
1289 for (i = 0; i < words; i++) {
1290 ixgbe_standby_eeprom(hw);
1292 /* Send the WRITE ENABLE command (8 bit opcode ) */
1293 ixgbe_shift_out_eeprom_bits(hw,
1294 IXGBE_EEPROM_WREN_OPCODE_SPI,
1295 IXGBE_EEPROM_OPCODE_BITS);
1297 ixgbe_standby_eeprom(hw);
1300 * Some SPI eeproms use the 8th address bit embedded
1303 if ((hw->eeprom.address_bits == 8) &&
1304 ((offset + i) >= 128))
1305 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1307 /* Send the Write command (8-bit opcode + addr) */
1308 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1309 IXGBE_EEPROM_OPCODE_BITS);
1310 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1311 hw->eeprom.address_bits);
1313 page_size = hw->eeprom.word_page_size;
1315 /* Send the data in burst via SPI*/
1318 word = (word >> 8) | (word << 8);
1319 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1324 /* do not wrap around page */
1325 if (((offset + i) & (page_size - 1)) ==
1328 } while (++i < words);
1330 ixgbe_standby_eeprom(hw);
1333 /* Done with writing - release the EEPROM */
1334 ixgbe_release_eeprom(hw);
1341 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1342 * @hw: pointer to hardware structure
1343 * @offset: offset within the EEPROM to be written to
1344 * @data: 16 bit word to be written to the EEPROM
1346 * If ixgbe_eeprom_update_checksum is not called after this function, the
1347 * EEPROM will most likely contain an invalid checksum.
1349 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1353 DEBUGFUNC("ixgbe_write_eeprom_generic");
1355 hw->eeprom.ops.init_params(hw);
1357 if (offset >= hw->eeprom.word_size) {
1358 status = IXGBE_ERR_EEPROM;
1362 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1369 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1370 * @hw: pointer to hardware structure
1371 * @offset: offset within the EEPROM to be read
1372 * @data: read 16 bit words(s) from EEPROM
1373 * @words: number of word(s)
1375 * Reads 16 bit word(s) from EEPROM through bit-bang method
1377 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1378 u16 words, u16 *data)
1380 s32 status = IXGBE_SUCCESS;
1383 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1385 hw->eeprom.ops.init_params(hw);
1388 status = IXGBE_ERR_INVALID_ARGUMENT;
1392 if (offset + words > hw->eeprom.word_size) {
1393 status = IXGBE_ERR_EEPROM;
1398 * We cannot hold synchronization semaphores for too long
1399 * to avoid other entity starvation. However it is more efficient
1400 * to read in bursts than synchronizing access for each word.
1402 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1403 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1404 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1406 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1409 if (status != IXGBE_SUCCESS)
1418 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1419 * @hw: pointer to hardware structure
1420 * @offset: offset within the EEPROM to be read
1421 * @words: number of word(s)
1422 * @data: read 16 bit word(s) from EEPROM
1424 * Reads 16 bit word(s) from EEPROM through bit-bang method
1426 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1427 u16 words, u16 *data)
1431 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1434 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1436 /* Prepare the EEPROM for reading */
1437 status = ixgbe_acquire_eeprom(hw);
1439 if (status == IXGBE_SUCCESS) {
1440 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1441 ixgbe_release_eeprom(hw);
1442 status = IXGBE_ERR_EEPROM;
1446 if (status == IXGBE_SUCCESS) {
1447 for (i = 0; i < words; i++) {
1448 ixgbe_standby_eeprom(hw);
1450 * Some SPI eeproms use the 8th address bit embedded
1453 if ((hw->eeprom.address_bits == 8) &&
1454 ((offset + i) >= 128))
1455 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1457 /* Send the READ command (opcode + addr) */
1458 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1459 IXGBE_EEPROM_OPCODE_BITS);
1460 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1461 hw->eeprom.address_bits);
1463 /* Read the data. */
1464 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1465 data[i] = (word_in >> 8) | (word_in << 8);
1468 /* End this read operation */
1469 ixgbe_release_eeprom(hw);
1476 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1477 * @hw: pointer to hardware structure
1478 * @offset: offset within the EEPROM to be read
1479 * @data: read 16 bit value from EEPROM
1481 * Reads 16 bit value from EEPROM through bit-bang method
1483 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1488 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1490 hw->eeprom.ops.init_params(hw);
1492 if (offset >= hw->eeprom.word_size) {
1493 status = IXGBE_ERR_EEPROM;
1497 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1504 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1505 * @hw: pointer to hardware structure
1506 * @offset: offset of word in the EEPROM to read
1507 * @words: number of word(s)
1508 * @data: 16 bit word(s) from the EEPROM
1510 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1512 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1513 u16 words, u16 *data)
1516 s32 status = IXGBE_SUCCESS;
1519 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1521 hw->eeprom.ops.init_params(hw);
1524 status = IXGBE_ERR_INVALID_ARGUMENT;
1525 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1529 if (offset >= hw->eeprom.word_size) {
1530 status = IXGBE_ERR_EEPROM;
1531 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1535 for (i = 0; i < words; i++) {
1536 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1537 IXGBE_EEPROM_RW_REG_START;
1539 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1540 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1542 if (status == IXGBE_SUCCESS) {
1543 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1544 IXGBE_EEPROM_RW_REG_DATA);
1546 DEBUGOUT("Eeprom read timed out\n");
1555 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1556 * @hw: pointer to hardware structure
1557 * @offset: offset within the EEPROM to be used as a scratch pad
1559 * Discover EEPROM page size by writing marching data at given offset.
1560 * This function is called only when we are writing a new large buffer
1561 * at given offset so the data would be overwritten anyway.
1563 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1566 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1567 s32 status = IXGBE_SUCCESS;
1570 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1572 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1575 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1576 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1577 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1578 hw->eeprom.word_page_size = 0;
1579 if (status != IXGBE_SUCCESS)
1582 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1583 if (status != IXGBE_SUCCESS)
1587 * When writing in burst more than the actual page size
1588 * EEPROM address wraps around current page.
1590 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1592 DEBUGOUT1("Detected EEPROM page size = %d words.",
1593 hw->eeprom.word_page_size);
1599 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1600 * @hw: pointer to hardware structure
1601 * @offset: offset of word in the EEPROM to read
1602 * @data: word read from the EEPROM
1604 * Reads a 16 bit word from the EEPROM using the EERD register.
1606 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1608 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1612 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1613 * @hw: pointer to hardware structure
1614 * @offset: offset of word in the EEPROM to write
1615 * @words: number of word(s)
1616 * @data: word(s) write to the EEPROM
1618 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1620 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1621 u16 words, u16 *data)
1624 s32 status = IXGBE_SUCCESS;
1627 DEBUGFUNC("ixgbe_write_eewr_generic");
1629 hw->eeprom.ops.init_params(hw);
1632 status = IXGBE_ERR_INVALID_ARGUMENT;
1633 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1637 if (offset >= hw->eeprom.word_size) {
1638 status = IXGBE_ERR_EEPROM;
1639 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1643 for (i = 0; i < words; i++) {
1644 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1645 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1646 IXGBE_EEPROM_RW_REG_START;
1648 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1649 if (status != IXGBE_SUCCESS) {
1650 DEBUGOUT("Eeprom write EEWR timed out\n");
1654 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1656 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1657 if (status != IXGBE_SUCCESS) {
1658 DEBUGOUT("Eeprom write EEWR timed out\n");
1668 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1669 * @hw: pointer to hardware structure
1670 * @offset: offset of word in the EEPROM to write
1671 * @data: word write to the EEPROM
1673 * Write a 16 bit word to the EEPROM using the EEWR register.
1675 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1677 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1681 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1682 * @hw: pointer to hardware structure
1683 * @ee_reg: EEPROM flag for polling
1685 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1686 * read or write is done respectively.
1688 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1692 s32 status = IXGBE_ERR_EEPROM;
1694 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1696 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1697 if (ee_reg == IXGBE_NVM_POLL_READ)
1698 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1700 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1702 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1703 status = IXGBE_SUCCESS;
1709 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1710 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1711 "EEPROM read/write done polling timed out");
1717 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1718 * @hw: pointer to hardware structure
1720 * Prepares EEPROM for access using bit-bang method. This function should
1721 * be called before issuing a command to the EEPROM.
1723 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1725 s32 status = IXGBE_SUCCESS;
1729 DEBUGFUNC("ixgbe_acquire_eeprom");
1731 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1733 status = IXGBE_ERR_SWFW_SYNC;
1735 if (status == IXGBE_SUCCESS) {
1736 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1738 /* Request EEPROM Access */
1739 eec |= IXGBE_EEC_REQ;
1740 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1742 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1743 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1744 if (eec & IXGBE_EEC_GNT)
1749 /* Release if grant not acquired */
1750 if (!(eec & IXGBE_EEC_GNT)) {
1751 eec &= ~IXGBE_EEC_REQ;
1752 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1753 DEBUGOUT("Could not acquire EEPROM grant\n");
1755 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1756 status = IXGBE_ERR_EEPROM;
1759 /* Setup EEPROM for Read/Write */
1760 if (status == IXGBE_SUCCESS) {
1761 /* Clear CS and SK */
1762 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1763 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1764 IXGBE_WRITE_FLUSH(hw);
1772 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1773 * @hw: pointer to hardware structure
1775 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1777 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1779 s32 status = IXGBE_ERR_EEPROM;
1784 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1787 /* Get SMBI software semaphore between device drivers first */
1788 for (i = 0; i < timeout; i++) {
1790 * If the SMBI bit is 0 when we read it, then the bit will be
1791 * set and we have the semaphore
1793 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1794 if (!(swsm & IXGBE_SWSM_SMBI)) {
1795 status = IXGBE_SUCCESS;
1802 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1805 * this release is particularly important because our attempts
1806 * above to get the semaphore may have succeeded, and if there
1807 * was a timeout, we should unconditionally clear the semaphore
1808 * bits to free the driver to make progress
1810 ixgbe_release_eeprom_semaphore(hw);
1815 * If the SMBI bit is 0 when we read it, then the bit will be
1816 * set and we have the semaphore
1818 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1819 if (!(swsm & IXGBE_SWSM_SMBI))
1820 status = IXGBE_SUCCESS;
1823 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1824 if (status == IXGBE_SUCCESS) {
1825 for (i = 0; i < timeout; i++) {
1826 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1828 /* Set the SW EEPROM semaphore bit to request access */
1829 swsm |= IXGBE_SWSM_SWESMBI;
1830 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1833 * If we set the bit successfully then we got the
1836 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1837 if (swsm & IXGBE_SWSM_SWESMBI)
1844 * Release semaphores and return error if SW EEPROM semaphore
1845 * was not granted because we don't have access to the EEPROM
1848 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1849 "SWESMBI Software EEPROM semaphore not granted.\n");
1850 ixgbe_release_eeprom_semaphore(hw);
1851 status = IXGBE_ERR_EEPROM;
1854 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1855 "Software semaphore SMBI between device drivers "
1863 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1864 * @hw: pointer to hardware structure
1866 * This function clears hardware semaphore bits.
1868 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1872 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1874 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1876 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1877 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1878 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1879 IXGBE_WRITE_FLUSH(hw);
1883 * ixgbe_ready_eeprom - Polls for EEPROM ready
1884 * @hw: pointer to hardware structure
1886 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1888 s32 status = IXGBE_SUCCESS;
1892 DEBUGFUNC("ixgbe_ready_eeprom");
1895 * Read "Status Register" repeatedly until the LSB is cleared. The
1896 * EEPROM will signal that the command has been completed by clearing
1897 * bit 0 of the internal status register. If it's not cleared within
1898 * 5 milliseconds, then error out.
1900 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1901 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1902 IXGBE_EEPROM_OPCODE_BITS);
1903 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1904 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1908 ixgbe_standby_eeprom(hw);
1912 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1913 * devices (and only 0-5mSec on 5V devices)
1915 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1916 DEBUGOUT("SPI EEPROM Status error\n");
1917 status = IXGBE_ERR_EEPROM;
1924 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1925 * @hw: pointer to hardware structure
1927 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1931 DEBUGFUNC("ixgbe_standby_eeprom");
1933 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1935 /* Toggle CS to flush commands */
1936 eec |= IXGBE_EEC_CS;
1937 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1938 IXGBE_WRITE_FLUSH(hw);
1940 eec &= ~IXGBE_EEC_CS;
1941 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1942 IXGBE_WRITE_FLUSH(hw);
1947 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1948 * @hw: pointer to hardware structure
1949 * @data: data to send to the EEPROM
1950 * @count: number of bits to shift out
1952 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1959 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1961 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1964 * Mask is used to shift "count" bits of "data" out to the EEPROM
1965 * one bit at a time. Determine the starting bit based on count
1967 mask = 0x01 << (count - 1);
1969 for (i = 0; i < count; i++) {
1971 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1972 * "1", and then raising and then lowering the clock (the SK
1973 * bit controls the clock input to the EEPROM). A "0" is
1974 * shifted out to the EEPROM by setting "DI" to "0" and then
1975 * raising and then lowering the clock.
1978 eec |= IXGBE_EEC_DI;
1980 eec &= ~IXGBE_EEC_DI;
1982 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1983 IXGBE_WRITE_FLUSH(hw);
1987 ixgbe_raise_eeprom_clk(hw, &eec);
1988 ixgbe_lower_eeprom_clk(hw, &eec);
1991 * Shift mask to signify next bit of data to shift in to the
1997 /* We leave the "DI" bit set to "0" when we leave this routine. */
1998 eec &= ~IXGBE_EEC_DI;
1999 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2000 IXGBE_WRITE_FLUSH(hw);
2004 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2005 * @hw: pointer to hardware structure
2007 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2013 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2016 * In order to read a register from the EEPROM, we need to shift
2017 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2018 * the clock input to the EEPROM (setting the SK bit), and then reading
2019 * the value of the "DO" bit. During this "shifting in" process the
2020 * "DI" bit should always be clear.
2022 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2024 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2026 for (i = 0; i < count; i++) {
2028 ixgbe_raise_eeprom_clk(hw, &eec);
2030 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2032 eec &= ~(IXGBE_EEC_DI);
2033 if (eec & IXGBE_EEC_DO)
2036 ixgbe_lower_eeprom_clk(hw, &eec);
2043 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2044 * @hw: pointer to hardware structure
2045 * @eec: EEC register's current value
2047 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2049 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2052 * Raise the clock input to the EEPROM
2053 * (setting the SK bit), then delay
2055 *eec = *eec | IXGBE_EEC_SK;
2056 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2057 IXGBE_WRITE_FLUSH(hw);
2062 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2063 * @hw: pointer to hardware structure
2064 * @eecd: EECD's current value
2066 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2068 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2071 * Lower the clock input to the EEPROM (clearing the SK bit), then
2074 *eec = *eec & ~IXGBE_EEC_SK;
2075 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2076 IXGBE_WRITE_FLUSH(hw);
2081 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2082 * @hw: pointer to hardware structure
2084 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2088 DEBUGFUNC("ixgbe_release_eeprom");
2090 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2092 eec |= IXGBE_EEC_CS; /* Pull CS high */
2093 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2095 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2096 IXGBE_WRITE_FLUSH(hw);
2100 /* Stop requesting EEPROM access */
2101 eec &= ~IXGBE_EEC_REQ;
2102 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2104 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2106 /* Delay before attempt to obtain semaphore again to allow FW access */
2107 msec_delay(hw->eeprom.semaphore_delay);
2111 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2112 * @hw: pointer to hardware structure
2114 * Returns a negative error code on error, or the 16-bit checksum
2116 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2125 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2127 /* Include 0x0-0x3F in the checksum */
2128 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2129 if (hw->eeprom.ops.read(hw, i, &word)) {
2130 DEBUGOUT("EEPROM read failed\n");
2131 return IXGBE_ERR_EEPROM;
2136 /* Include all data from pointers except for the fw pointer */
2137 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2138 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2139 DEBUGOUT("EEPROM read failed\n");
2140 return IXGBE_ERR_EEPROM;
2143 /* If the pointer seems invalid */
2144 if (pointer == 0xFFFF || pointer == 0)
2147 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2148 DEBUGOUT("EEPROM read failed\n");
2149 return IXGBE_ERR_EEPROM;
2152 if (length == 0xFFFF || length == 0)
2155 for (j = pointer + 1; j <= pointer + length; j++) {
2156 if (hw->eeprom.ops.read(hw, j, &word)) {
2157 DEBUGOUT("EEPROM read failed\n");
2158 return IXGBE_ERR_EEPROM;
2164 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2166 return (s32)checksum;
2170 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2171 * @hw: pointer to hardware structure
2172 * @checksum_val: calculated checksum
2174 * Performs checksum calculation and validates the EEPROM checksum. If the
2175 * caller does not need checksum_val, the value can be NULL.
2177 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2182 u16 read_checksum = 0;
2184 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2186 /* Read the first word from the EEPROM. If this times out or fails, do
2187 * not continue or we could be in for a very long wait while every
2190 status = hw->eeprom.ops.read(hw, 0, &checksum);
2192 DEBUGOUT("EEPROM read failed\n");
2196 status = hw->eeprom.ops.calc_checksum(hw);
2200 checksum = (u16)(status & 0xffff);
2202 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2204 DEBUGOUT("EEPROM read failed\n");
2208 /* Verify read checksum from EEPROM is the same as
2209 * calculated checksum
2211 if (read_checksum != checksum)
2212 status = IXGBE_ERR_EEPROM_CHECKSUM;
2214 /* If the user cares, return the calculated checksum */
2216 *checksum_val = checksum;
2222 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2223 * @hw: pointer to hardware structure
2225 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2230 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2232 /* Read the first word from the EEPROM. If this times out or fails, do
2233 * not continue or we could be in for a very long wait while every
2236 status = hw->eeprom.ops.read(hw, 0, &checksum);
2238 DEBUGOUT("EEPROM read failed\n");
2242 status = hw->eeprom.ops.calc_checksum(hw);
2246 checksum = (u16)(status & 0xffff);
2248 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2254 * ixgbe_validate_mac_addr - Validate MAC address
2255 * @mac_addr: pointer to MAC address.
2257 * Tests a MAC address to ensure it is a valid Individual Address.
2259 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2261 s32 status = IXGBE_SUCCESS;
2263 DEBUGFUNC("ixgbe_validate_mac_addr");
2265 /* Make sure it is not a multicast address */
2266 if (IXGBE_IS_MULTICAST(mac_addr)) {
2267 status = IXGBE_ERR_INVALID_MAC_ADDR;
2268 /* Not a broadcast address */
2269 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2270 status = IXGBE_ERR_INVALID_MAC_ADDR;
2271 /* Reject the zero address */
2272 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2273 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2274 status = IXGBE_ERR_INVALID_MAC_ADDR;
2280 * ixgbe_set_rar_generic - Set Rx address register
2281 * @hw: pointer to hardware structure
2282 * @index: Receive address register to write
2283 * @addr: Address to put into receive address register
2284 * @vmdq: VMDq "set" or "pool" index
2285 * @enable_addr: set flag that address is active
2287 * Puts an ethernet address into a receive address register.
2289 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2292 u32 rar_low, rar_high;
2293 u32 rar_entries = hw->mac.num_rar_entries;
2295 DEBUGFUNC("ixgbe_set_rar_generic");
2297 /* Make sure we are using a valid rar index range */
2298 if (index >= rar_entries) {
2299 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2300 "RAR index %d is out of range.\n", index);
2301 return IXGBE_ERR_INVALID_ARGUMENT;
2304 /* setup VMDq pool selection before this RAR gets enabled */
2305 hw->mac.ops.set_vmdq(hw, index, vmdq);
2308 * HW expects these in little endian so we reverse the byte
2309 * order from network order (big endian) to little endian
2311 rar_low = ((u32)addr[0] |
2312 ((u32)addr[1] << 8) |
2313 ((u32)addr[2] << 16) |
2314 ((u32)addr[3] << 24));
2316 * Some parts put the VMDq setting in the extra RAH bits,
2317 * so save everything except the lower 16 bits that hold part
2318 * of the address and the address valid bit.
2320 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2321 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2322 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2324 if (enable_addr != 0)
2325 rar_high |= IXGBE_RAH_AV;
2327 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2328 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2330 return IXGBE_SUCCESS;
2334 * ixgbe_clear_rar_generic - Remove Rx address register
2335 * @hw: pointer to hardware structure
2336 * @index: Receive address register to write
2338 * Clears an ethernet address from a receive address register.
2340 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2343 u32 rar_entries = hw->mac.num_rar_entries;
2345 DEBUGFUNC("ixgbe_clear_rar_generic");
2347 /* Make sure we are using a valid rar index range */
2348 if (index >= rar_entries) {
2349 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2350 "RAR index %d is out of range.\n", index);
2351 return IXGBE_ERR_INVALID_ARGUMENT;
2355 * Some parts put the VMDq setting in the extra RAH bits,
2356 * so save everything except the lower 16 bits that hold part
2357 * of the address and the address valid bit.
2359 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2360 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2362 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2363 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2365 /* clear VMDq pool/queue selection for this RAR */
2366 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2368 return IXGBE_SUCCESS;
2372 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2373 * @hw: pointer to hardware structure
2375 * Places the MAC address in receive address register 0 and clears the rest
2376 * of the receive address registers. Clears the multicast table. Assumes
2377 * the receiver is in reset when the routine is called.
2379 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2382 u32 rar_entries = hw->mac.num_rar_entries;
2384 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2387 * If the current mac address is valid, assume it is a software override
2388 * to the permanent address.
2389 * Otherwise, use the permanent address from the eeprom.
2391 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2392 IXGBE_ERR_INVALID_MAC_ADDR) {
2393 /* Get the MAC address from the RAR0 for later reference */
2394 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2396 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2397 hw->mac.addr[0], hw->mac.addr[1],
2399 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2400 hw->mac.addr[4], hw->mac.addr[5]);
2402 /* Setup the receive address. */
2403 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2404 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2405 hw->mac.addr[0], hw->mac.addr[1],
2407 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2408 hw->mac.addr[4], hw->mac.addr[5]);
2410 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2413 /* clear VMDq pool/queue selection for RAR 0 */
2414 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2416 hw->addr_ctrl.overflow_promisc = 0;
2418 hw->addr_ctrl.rar_used_count = 1;
2420 /* Zero out the other receive addresses. */
2421 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2422 for (i = 1; i < rar_entries; i++) {
2423 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2424 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2428 hw->addr_ctrl.mta_in_use = 0;
2429 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2431 DEBUGOUT(" Clearing MTA\n");
2432 for (i = 0; i < hw->mac.mcft_size; i++)
2433 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2435 ixgbe_init_uta_tables(hw);
2437 return IXGBE_SUCCESS;
2441 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2442 * @hw: pointer to hardware structure
2443 * @addr: new address
2445 * Adds it to unused receive address register or goes into promiscuous mode.
2447 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2449 u32 rar_entries = hw->mac.num_rar_entries;
2452 DEBUGFUNC("ixgbe_add_uc_addr");
2454 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2455 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2458 * Place this address in the RAR if there is room,
2459 * else put the controller into promiscuous mode
2461 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2462 rar = hw->addr_ctrl.rar_used_count;
2463 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2464 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2465 hw->addr_ctrl.rar_used_count++;
2467 hw->addr_ctrl.overflow_promisc++;
2470 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2474 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2475 * @hw: pointer to hardware structure
2476 * @addr_list: the list of new addresses
2477 * @addr_count: number of addresses
2478 * @next: iterator function to walk the address list
2480 * The given list replaces any existing list. Clears the secondary addrs from
2481 * receive address registers. Uses unused receive address registers for the
2482 * first secondary addresses, and falls back to promiscuous mode as needed.
2484 * Drivers using secondary unicast addresses must set user_set_promisc when
2485 * manually putting the device into promiscuous mode.
2487 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2488 u32 addr_count, ixgbe_mc_addr_itr next)
2492 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2497 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2500 * Clear accounting of old secondary address list,
2501 * don't count RAR[0]
2503 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2504 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2505 hw->addr_ctrl.overflow_promisc = 0;
2507 /* Zero out the other receive addresses */
2508 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2509 for (i = 0; i < uc_addr_in_use; i++) {
2510 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2511 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2514 /* Add the new addresses */
2515 for (i = 0; i < addr_count; i++) {
2516 DEBUGOUT(" Adding the secondary addresses:\n");
2517 addr = next(hw, &addr_list, &vmdq);
2518 ixgbe_add_uc_addr(hw, addr, vmdq);
2521 if (hw->addr_ctrl.overflow_promisc) {
2522 /* enable promisc if not already in overflow or set by user */
2523 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2524 DEBUGOUT(" Entering address overflow promisc mode\n");
2525 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2526 fctrl |= IXGBE_FCTRL_UPE;
2527 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2530 /* only disable if set by overflow, not by user */
2531 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2532 DEBUGOUT(" Leaving address overflow promisc mode\n");
2533 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2534 fctrl &= ~IXGBE_FCTRL_UPE;
2535 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2539 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2540 return IXGBE_SUCCESS;
2544 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2545 * @hw: pointer to hardware structure
2546 * @mc_addr: the multicast address
2548 * Extracts the 12 bits, from a multicast address, to determine which
2549 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2550 * incoming rx multicast addresses, to determine the bit-vector to check in
2551 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2552 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2553 * to mc_filter_type.
2555 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2559 DEBUGFUNC("ixgbe_mta_vector");
2561 switch (hw->mac.mc_filter_type) {
2562 case 0: /* use bits [47:36] of the address */
2563 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2565 case 1: /* use bits [46:35] of the address */
2566 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2568 case 2: /* use bits [45:34] of the address */
2569 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2571 case 3: /* use bits [43:32] of the address */
2572 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2574 default: /* Invalid mc_filter_type */
2575 DEBUGOUT("MC filter type param set incorrectly\n");
2580 /* vector can only be 12-bits or boundary will be exceeded */
2586 * ixgbe_set_mta - Set bit-vector in multicast table
2587 * @hw: pointer to hardware structure
2588 * @hash_value: Multicast address hash value
2590 * Sets the bit-vector in the multicast table.
2592 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2598 DEBUGFUNC("ixgbe_set_mta");
2600 hw->addr_ctrl.mta_in_use++;
2602 vector = ixgbe_mta_vector(hw, mc_addr);
2603 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2606 * The MTA is a register array of 128 32-bit registers. It is treated
2607 * like an array of 4096 bits. We want to set bit
2608 * BitArray[vector_value]. So we figure out what register the bit is
2609 * in, read it, OR in the new bit, then write back the new value. The
2610 * register is determined by the upper 7 bits of the vector value and
2611 * the bit within that register are determined by the lower 5 bits of
2614 vector_reg = (vector >> 5) & 0x7F;
2615 vector_bit = vector & 0x1F;
2616 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2620 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2621 * @hw: pointer to hardware structure
2622 * @mc_addr_list: the list of new multicast addresses
2623 * @mc_addr_count: number of addresses
2624 * @next: iterator function to walk the multicast address list
2625 * @clear: flag, when set clears the table beforehand
2627 * When the clear flag is set, the given list replaces any existing list.
2628 * Hashes the given addresses into the multicast table.
2630 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2631 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2637 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2640 * Set the new number of MC addresses that we are being requested to
2643 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2644 hw->addr_ctrl.mta_in_use = 0;
2646 /* Clear mta_shadow */
2648 DEBUGOUT(" Clearing MTA\n");
2649 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2652 /* Update mta_shadow */
2653 for (i = 0; i < mc_addr_count; i++) {
2654 DEBUGOUT(" Adding the multicast addresses:\n");
2655 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2659 for (i = 0; i < hw->mac.mcft_size; i++)
2660 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2661 hw->mac.mta_shadow[i]);
2663 if (hw->addr_ctrl.mta_in_use > 0)
2664 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2665 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2667 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2668 return IXGBE_SUCCESS;
2672 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2673 * @hw: pointer to hardware structure
2675 * Enables multicast address in RAR and the use of the multicast hash table.
2677 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2679 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2681 DEBUGFUNC("ixgbe_enable_mc_generic");
2683 if (a->mta_in_use > 0)
2684 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2685 hw->mac.mc_filter_type);
2687 return IXGBE_SUCCESS;
2691 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2692 * @hw: pointer to hardware structure
2694 * Disables multicast address in RAR and the use of the multicast hash table.
2696 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2698 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2700 DEBUGFUNC("ixgbe_disable_mc_generic");
2702 if (a->mta_in_use > 0)
2703 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2705 return IXGBE_SUCCESS;
2709 * ixgbe_fc_enable_generic - Enable flow control
2710 * @hw: pointer to hardware structure
2712 * Enable flow control according to the current settings.
2714 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2716 s32 ret_val = IXGBE_SUCCESS;
2717 u32 mflcn_reg, fccfg_reg;
2722 DEBUGFUNC("ixgbe_fc_enable_generic");
2724 /* Validate the water mark configuration */
2725 if (!hw->fc.pause_time) {
2726 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2730 /* Low water mark of zero causes XOFF floods */
2731 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2732 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2733 hw->fc.high_water[i]) {
2734 if (!hw->fc.low_water[i] ||
2735 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2736 DEBUGOUT("Invalid water mark configuration\n");
2737 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2743 /* Negotiate the fc mode to use */
2744 hw->mac.ops.fc_autoneg(hw);
2746 /* Disable any previous flow control settings */
2747 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2748 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2750 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2751 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2754 * The possible values of fc.current_mode are:
2755 * 0: Flow control is completely disabled
2756 * 1: Rx flow control is enabled (we can receive pause frames,
2757 * but not send pause frames).
2758 * 2: Tx flow control is enabled (we can send pause frames but
2759 * we do not support receiving pause frames).
2760 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2763 switch (hw->fc.current_mode) {
2766 * Flow control is disabled by software override or autoneg.
2767 * The code below will actually disable it in the HW.
2770 case ixgbe_fc_rx_pause:
2772 * Rx Flow control is enabled and Tx Flow control is
2773 * disabled by software override. Since there really
2774 * isn't a way to advertise that we are capable of RX
2775 * Pause ONLY, we will advertise that we support both
2776 * symmetric and asymmetric Rx PAUSE. Later, we will
2777 * disable the adapter's ability to send PAUSE frames.
2779 mflcn_reg |= IXGBE_MFLCN_RFCE;
2781 case ixgbe_fc_tx_pause:
2783 * Tx Flow control is enabled, and Rx Flow control is
2784 * disabled by software override.
2786 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2789 /* Flow control (both Rx and Tx) is enabled by SW override. */
2790 mflcn_reg |= IXGBE_MFLCN_RFCE;
2791 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2794 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2795 "Flow control param set incorrectly\n");
2796 ret_val = IXGBE_ERR_CONFIG;
2801 /* Set 802.3x based flow control settings. */
2802 mflcn_reg |= IXGBE_MFLCN_DPF;
2803 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2804 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2807 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2808 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2809 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2810 hw->fc.high_water[i]) {
2811 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2812 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2813 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2815 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2817 * In order to prevent Tx hangs when the internal Tx
2818 * switch is enabled we must set the high water mark
2819 * to the Rx packet buffer size - 24KB. This allows
2820 * the Tx switch to function even under heavy Rx
2823 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2826 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2829 /* Configure pause time (2 TCs per register) */
2830 reg = hw->fc.pause_time * 0x00010001;
2831 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2832 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2834 /* Configure flow control refresh threshold value */
2835 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2842 * ixgbe_negotiate_fc - Negotiate flow control
2843 * @hw: pointer to hardware structure
2844 * @adv_reg: flow control advertised settings
2845 * @lp_reg: link partner's flow control settings
2846 * @adv_sym: symmetric pause bit in advertisement
2847 * @adv_asm: asymmetric pause bit in advertisement
2848 * @lp_sym: symmetric pause bit in link partner advertisement
2849 * @lp_asm: asymmetric pause bit in link partner advertisement
2851 * Find the intersection between advertised settings and link partner's
2852 * advertised settings
2854 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2855 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2857 if ((!(adv_reg)) || (!(lp_reg))) {
2858 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2859 "Local or link partner's advertised flow control "
2860 "settings are NULL. Local: %x, link partner: %x\n",
2862 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2865 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2867 * Now we need to check if the user selected Rx ONLY
2868 * of pause frames. In this case, we had to advertise
2869 * FULL flow control because we could not advertise RX
2870 * ONLY. Hence, we must now check to see if we need to
2871 * turn OFF the TRANSMISSION of PAUSE frames.
2873 if (hw->fc.requested_mode == ixgbe_fc_full) {
2874 hw->fc.current_mode = ixgbe_fc_full;
2875 DEBUGOUT("Flow Control = FULL.\n");
2877 hw->fc.current_mode = ixgbe_fc_rx_pause;
2878 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2880 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2881 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2882 hw->fc.current_mode = ixgbe_fc_tx_pause;
2883 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2884 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2885 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2886 hw->fc.current_mode = ixgbe_fc_rx_pause;
2887 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2889 hw->fc.current_mode = ixgbe_fc_none;
2890 DEBUGOUT("Flow Control = NONE.\n");
2892 return IXGBE_SUCCESS;
2896 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2897 * @hw: pointer to hardware structure
2899 * Enable flow control according on 1 gig fiber.
2901 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2903 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2904 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2907 * On multispeed fiber at 1g, bail out if
2908 * - link is up but AN did not complete, or if
2909 * - link is up and AN completed but timed out
2912 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2913 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2914 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2915 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2919 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2920 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2922 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2923 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2924 IXGBE_PCS1GANA_ASM_PAUSE,
2925 IXGBE_PCS1GANA_SYM_PAUSE,
2926 IXGBE_PCS1GANA_ASM_PAUSE);
2933 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2934 * @hw: pointer to hardware structure
2936 * Enable flow control according to IEEE clause 37.
2938 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2940 u32 links2, anlp1_reg, autoc_reg, links;
2941 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2944 * On backplane, bail out if
2945 * - backplane autoneg was not completed, or if
2946 * - we are 82599 and link partner is not AN enabled
2948 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2949 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2950 DEBUGOUT("Auto-Negotiation did not complete\n");
2954 if (hw->mac.type == ixgbe_mac_82599EB) {
2955 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2956 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2957 DEBUGOUT("Link partner is not AN enabled\n");
2962 * Read the 10g AN autoc and LP ability registers and resolve
2963 * local flow control settings accordingly
2965 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2966 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2968 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2969 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2970 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2977 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2978 * @hw: pointer to hardware structure
2980 * Enable flow control according to IEEE clause 37.
2982 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2984 u16 technology_ability_reg = 0;
2985 u16 lp_technology_ability_reg = 0;
2987 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2988 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2989 &technology_ability_reg);
2990 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2991 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2992 &lp_technology_ability_reg);
2994 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2995 (u32)lp_technology_ability_reg,
2996 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2997 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3001 * ixgbe_fc_autoneg - Configure flow control
3002 * @hw: pointer to hardware structure
3004 * Compares our advertised flow control capabilities to those advertised by
3005 * our link partner, and determines the proper flow control mode to use.
3007 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3009 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3010 ixgbe_link_speed speed;
3013 DEBUGFUNC("ixgbe_fc_autoneg");
3016 * AN should have completed when the cable was plugged in.
3017 * Look for reasons to bail out. Bail out if:
3018 * - FC autoneg is disabled, or if
3021 if (hw->fc.disable_fc_autoneg) {
3022 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3023 "Flow control autoneg is disabled");
3027 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3029 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3033 switch (hw->phy.media_type) {
3034 /* Autoneg flow control on fiber adapters */
3035 case ixgbe_media_type_fiber_qsfp:
3036 case ixgbe_media_type_fiber:
3037 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3038 ret_val = ixgbe_fc_autoneg_fiber(hw);
3041 /* Autoneg flow control on backplane adapters */
3042 case ixgbe_media_type_backplane:
3043 ret_val = ixgbe_fc_autoneg_backplane(hw);
3046 /* Autoneg flow control on copper adapters */
3047 case ixgbe_media_type_copper:
3048 if (ixgbe_device_supports_autoneg_fc(hw))
3049 ret_val = ixgbe_fc_autoneg_copper(hw);
3057 if (ret_val == IXGBE_SUCCESS) {
3058 hw->fc.fc_was_autonegged = true;
3060 hw->fc.fc_was_autonegged = false;
3061 hw->fc.current_mode = hw->fc.requested_mode;
3066 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3067 * @hw: pointer to hardware structure
3069 * System-wide timeout range is encoded in PCIe Device Control2 register.
3071 * Add 10% to specified maximum and return the number of times to poll for
3072 * completion timeout, in units of 100 microsec. Never return less than
3073 * 800 = 80 millisec.
3075 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3080 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3081 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3084 case IXGBE_PCIDEVCTRL2_65_130ms:
3085 pollcnt = 1300; /* 130 millisec */
3087 case IXGBE_PCIDEVCTRL2_260_520ms:
3088 pollcnt = 5200; /* 520 millisec */
3090 case IXGBE_PCIDEVCTRL2_1_2s:
3091 pollcnt = 20000; /* 2 sec */
3093 case IXGBE_PCIDEVCTRL2_4_8s:
3094 pollcnt = 80000; /* 8 sec */
3096 case IXGBE_PCIDEVCTRL2_17_34s:
3097 pollcnt = 34000; /* 34 sec */
3099 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3100 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3101 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3102 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3104 pollcnt = 800; /* 80 millisec minimum */
3108 /* add 10% to spec maximum */
3109 return (pollcnt * 11) / 10;
3113 * ixgbe_disable_pcie_master - Disable PCI-express master access
3114 * @hw: pointer to hardware structure
3116 * Disables PCI-Express master access and verifies there are no pending
3117 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3118 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3119 * is returned signifying master requests disabled.
3121 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3123 s32 status = IXGBE_SUCCESS;
3127 DEBUGFUNC("ixgbe_disable_pcie_master");
3129 /* Always set this bit to ensure any future transactions are blocked */
3130 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3132 /* Exit if master requests are blocked */
3133 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3134 IXGBE_REMOVED(hw->hw_addr))
3137 /* Poll for master request bit to clear */
3138 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3140 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3145 * Two consecutive resets are required via CTRL.RST per datasheet
3146 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3147 * of this need. The first reset prevents new master requests from
3148 * being issued by our device. We then must wait 1usec or more for any
3149 * remaining completions from the PCIe bus to trickle in, and then reset
3150 * again to clear out any effects they may have had on our device.
3152 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3153 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3155 if (hw->mac.type >= ixgbe_mac_X550)
3159 * Before proceeding, make sure that the PCIe block does not have
3160 * transactions pending.
3162 poll = ixgbe_pcie_timeout_poll(hw);
3163 for (i = 0; i < poll; i++) {
3165 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3166 if (IXGBE_REMOVED(hw->hw_addr))
3168 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3172 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3173 "PCIe transaction pending bit also did not clear.\n");
3174 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3181 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3182 * @hw: pointer to hardware structure
3183 * @mask: Mask to specify which semaphore to acquire
3185 * Acquires the SWFW semaphore through the GSSR register for the specified
3186 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3188 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3192 u32 fwmask = mask << 5;
3196 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3198 for (i = 0; i < timeout; i++) {
3200 * SW NVM semaphore bit is used for access to all
3201 * SW_FW_SYNC bits (not just NVM)
3203 if (ixgbe_get_eeprom_semaphore(hw))
3204 return IXGBE_ERR_SWFW_SYNC;
3206 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3207 if (!(gssr & (fwmask | swmask))) {
3209 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3210 ixgbe_release_eeprom_semaphore(hw);
3211 return IXGBE_SUCCESS;
3213 /* Resource is currently in use by FW or SW */
3214 ixgbe_release_eeprom_semaphore(hw);
3219 /* If time expired clear the bits holding the lock and retry */
3220 if (gssr & (fwmask | swmask))
3221 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3224 return IXGBE_ERR_SWFW_SYNC;
3228 * ixgbe_release_swfw_sync - Release SWFW semaphore
3229 * @hw: pointer to hardware structure
3230 * @mask: Mask to specify which semaphore to release
3232 * Releases the SWFW semaphore through the GSSR register for the specified
3233 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3235 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3240 DEBUGFUNC("ixgbe_release_swfw_sync");
3242 ixgbe_get_eeprom_semaphore(hw);
3244 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3246 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3248 ixgbe_release_eeprom_semaphore(hw);
3252 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3253 * @hw: pointer to hardware structure
3255 * Stops the receive data path and waits for the HW to internally empty
3256 * the Rx security block
3258 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3260 #define IXGBE_MAX_SECRX_POLL 40
3265 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3268 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3269 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3270 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3271 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3272 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3273 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3276 /* Use interrupt-safe sleep just in case */
3280 /* For informational purposes only */
3281 if (i >= IXGBE_MAX_SECRX_POLL)
3282 DEBUGOUT("Rx unit being enabled before security "
3283 "path fully disabled. Continuing with init.\n");
3285 return IXGBE_SUCCESS;
3289 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3290 * @hw: pointer to hardware structure
3291 * @reg_val: Value we read from AUTOC
3293 * The default case requires no protection so just to the register read.
3295 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3298 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3299 return IXGBE_SUCCESS;
3303 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3304 * @hw: pointer to hardware structure
3305 * @reg_val: value to write to AUTOC
3306 * @locked: bool to indicate whether the SW/FW lock was already taken by
3309 * The default case requires no protection so just to the register write.
3311 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3313 UNREFERENCED_1PARAMETER(locked);
3315 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3316 return IXGBE_SUCCESS;
3320 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3321 * @hw: pointer to hardware structure
3323 * Enables the receive data path.
3325 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3329 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3331 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3332 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3333 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3334 IXGBE_WRITE_FLUSH(hw);
3336 return IXGBE_SUCCESS;
3340 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3341 * @hw: pointer to hardware structure
3342 * @regval: register value to write to RXCTRL
3344 * Enables the Rx DMA unit
3346 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3348 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3350 if (regval & IXGBE_RXCTRL_RXEN)
3351 ixgbe_enable_rx(hw);
3353 ixgbe_disable_rx(hw);
3355 return IXGBE_SUCCESS;
3359 * ixgbe_blink_led_start_generic - Blink LED based on index.
3360 * @hw: pointer to hardware structure
3361 * @index: led number to blink
3363 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3365 ixgbe_link_speed speed = 0;
3368 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3369 s32 ret_val = IXGBE_SUCCESS;
3370 bool locked = false;
3372 DEBUGFUNC("ixgbe_blink_led_start_generic");
3375 * Link must be up to auto-blink the LEDs;
3376 * Force it if link is down.
3378 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3381 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3382 if (ret_val != IXGBE_SUCCESS)
3385 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3386 autoc_reg |= IXGBE_AUTOC_FLU;
3388 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3389 if (ret_val != IXGBE_SUCCESS)
3392 IXGBE_WRITE_FLUSH(hw);
3396 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3397 led_reg |= IXGBE_LED_BLINK(index);
3398 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3399 IXGBE_WRITE_FLUSH(hw);
3406 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3407 * @hw: pointer to hardware structure
3408 * @index: led number to stop blinking
3410 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3413 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3414 s32 ret_val = IXGBE_SUCCESS;
3415 bool locked = false;
3417 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3419 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3420 if (ret_val != IXGBE_SUCCESS)
3423 autoc_reg &= ~IXGBE_AUTOC_FLU;
3424 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3426 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3427 if (ret_val != IXGBE_SUCCESS)
3430 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3431 led_reg &= ~IXGBE_LED_BLINK(index);
3432 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3433 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3434 IXGBE_WRITE_FLUSH(hw);
3441 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3442 * @hw: pointer to hardware structure
3443 * @san_mac_offset: SAN MAC address offset
3445 * This function will read the EEPROM location for the SAN MAC address
3446 * pointer, and returns the value at that location. This is used in both
3447 * get and set mac_addr routines.
3449 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3450 u16 *san_mac_offset)
3454 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3457 * First read the EEPROM pointer to see if the MAC addresses are
3460 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3463 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3464 "eeprom at offset %d failed",
3465 IXGBE_SAN_MAC_ADDR_PTR);
3472 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3473 * @hw: pointer to hardware structure
3474 * @san_mac_addr: SAN MAC address
3476 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3477 * per-port, so set_lan_id() must be called before reading the addresses.
3478 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3479 * upon for non-SFP connections, so we must call it here.
3481 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3483 u16 san_mac_data, san_mac_offset;
3487 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3490 * First read the EEPROM pointer to see if the MAC addresses are
3491 * available. If they're not, no point in calling set_lan_id() here.
3493 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3494 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3495 goto san_mac_addr_out;
3497 /* make sure we know which port we need to program */
3498 hw->mac.ops.set_lan_id(hw);
3499 /* apply the port offset to the address offset */
3500 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3501 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3502 for (i = 0; i < 3; i++) {
3503 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3506 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3507 "eeprom read at offset %d failed",
3509 goto san_mac_addr_out;
3511 san_mac_addr[i * 2] = (u8)(san_mac_data);
3512 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3515 return IXGBE_SUCCESS;
3519 * No addresses available in this EEPROM. It's not an
3520 * error though, so just wipe the local address and return.
3522 for (i = 0; i < 6; i++)
3523 san_mac_addr[i] = 0xFF;
3524 return IXGBE_SUCCESS;
3528 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3529 * @hw: pointer to hardware structure
3530 * @san_mac_addr: SAN MAC address
3532 * Write a SAN MAC address to the EEPROM.
3534 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3537 u16 san_mac_data, san_mac_offset;
3540 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3542 /* Look for SAN mac address pointer. If not defined, return */
3543 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3544 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3545 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3547 /* Make sure we know which port we need to write */
3548 hw->mac.ops.set_lan_id(hw);
3549 /* Apply the port offset to the address offset */
3550 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3551 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3553 for (i = 0; i < 3; i++) {
3554 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3555 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3556 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3560 return IXGBE_SUCCESS;
3564 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3565 * @hw: pointer to hardware structure
3567 * Read PCIe configuration space, and get the MSI-X vector count from
3568 * the capabilities table.
3570 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3576 switch (hw->mac.type) {
3577 case ixgbe_mac_82598EB:
3578 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3579 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3581 case ixgbe_mac_82599EB:
3582 case ixgbe_mac_X540:
3583 case ixgbe_mac_X550:
3584 case ixgbe_mac_X550EM_x:
3585 case ixgbe_mac_X550EM_a:
3586 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3587 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3593 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3594 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3595 if (IXGBE_REMOVED(hw->hw_addr))
3597 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3599 /* MSI-X count is zero-based in HW */
3602 if (msix_count > max_msix_count)
3603 msix_count = max_msix_count;
3609 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3610 * @hw: pointer to hardware structure
3611 * @addr: Address to put into receive address register
3612 * @vmdq: VMDq pool to assign
3614 * Puts an ethernet address into a receive address register, or
3615 * finds the rar that it is aleady in; adds to the pool list
3617 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3619 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3620 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3622 u32 rar_low, rar_high;
3623 u32 addr_low, addr_high;
3625 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3627 /* swap bytes for HW little endian */
3628 addr_low = addr[0] | (addr[1] << 8)
3631 addr_high = addr[4] | (addr[5] << 8);
3634 * Either find the mac_id in rar or find the first empty space.
3635 * rar_highwater points to just after the highest currently used
3636 * rar in order to shorten the search. It grows when we add a new
3639 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3640 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3642 if (((IXGBE_RAH_AV & rar_high) == 0)
3643 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3644 first_empty_rar = rar;
3645 } else if ((rar_high & 0xFFFF) == addr_high) {
3646 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3647 if (rar_low == addr_low)
3648 break; /* found it already in the rars */
3652 if (rar < hw->mac.rar_highwater) {
3653 /* already there so just add to the pool bits */
3654 ixgbe_set_vmdq(hw, rar, vmdq);
3655 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3656 /* stick it into first empty RAR slot we found */
3657 rar = first_empty_rar;
3658 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3659 } else if (rar == hw->mac.rar_highwater) {
3660 /* add it to the top of the list and inc the highwater mark */
3661 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3662 hw->mac.rar_highwater++;
3663 } else if (rar >= hw->mac.num_rar_entries) {
3664 return IXGBE_ERR_INVALID_MAC_ADDR;
3668 * If we found rar[0], make sure the default pool bit (we use pool 0)
3669 * remains cleared to be sure default pool packets will get delivered
3672 ixgbe_clear_vmdq(hw, rar, 0);
3678 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3679 * @hw: pointer to hardware struct
3680 * @rar: receive address register index to disassociate
3681 * @vmdq: VMDq pool index to remove from the rar
3683 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3685 u32 mpsar_lo, mpsar_hi;
3686 u32 rar_entries = hw->mac.num_rar_entries;
3688 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3690 /* Make sure we are using a valid rar index range */
3691 if (rar >= rar_entries) {
3692 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3693 "RAR index %d is out of range.\n", rar);
3694 return IXGBE_ERR_INVALID_ARGUMENT;
3697 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3698 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3700 if (IXGBE_REMOVED(hw->hw_addr))
3703 if (!mpsar_lo && !mpsar_hi)
3706 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3708 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3712 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3715 } else if (vmdq < 32) {
3716 mpsar_lo &= ~(1 << vmdq);
3717 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3719 mpsar_hi &= ~(1 << (vmdq - 32));
3720 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3723 /* was that the last pool using this rar? */
3724 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3725 hw->mac.ops.clear_rar(hw, rar);
3727 return IXGBE_SUCCESS;
3731 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3732 * @hw: pointer to hardware struct
3733 * @rar: receive address register index to associate with a VMDq index
3734 * @vmdq: VMDq pool index
3736 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3739 u32 rar_entries = hw->mac.num_rar_entries;
3741 DEBUGFUNC("ixgbe_set_vmdq_generic");
3743 /* Make sure we are using a valid rar index range */
3744 if (rar >= rar_entries) {
3745 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3746 "RAR index %d is out of range.\n", rar);
3747 return IXGBE_ERR_INVALID_ARGUMENT;
3751 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3753 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3755 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3756 mpsar |= 1 << (vmdq - 32);
3757 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3759 return IXGBE_SUCCESS;
3763 * This function should only be involved in the IOV mode.
3764 * In IOV mode, Default pool is next pool after the number of
3765 * VFs advertized and not 0.
3766 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3768 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3769 * @hw: pointer to hardware struct
3770 * @vmdq: VMDq pool index
3772 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3774 u32 rar = hw->mac.san_mac_rar_index;
3776 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3779 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3780 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3782 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3783 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3786 return IXGBE_SUCCESS;
3790 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3791 * @hw: pointer to hardware structure
3793 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3797 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3798 DEBUGOUT(" Clearing UTA\n");
3800 for (i = 0; i < 128; i++)
3801 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3803 return IXGBE_SUCCESS;
3807 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3808 * @hw: pointer to hardware structure
3809 * @vlan: VLAN id to write to VLAN filter
3811 * return the VLVF index where this VLAN id should be placed
3814 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3816 s32 regindex, first_empty_slot;
3819 /* short cut the special case */
3823 /* if vlvf_bypass is set we don't want to use an empty slot, we
3824 * will simply bypass the VLVF if there are no entries present in the
3825 * VLVF that contain our VLAN
3827 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3829 /* add VLAN enable bit for comparison */
3830 vlan |= IXGBE_VLVF_VIEN;
3832 /* Search for the vlan id in the VLVF entries. Save off the first empty
3833 * slot found along the way.
3835 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3837 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3838 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3841 if (!first_empty_slot && !bits)
3842 first_empty_slot = regindex;
3845 /* If we are here then we didn't find the VLAN. Return first empty
3846 * slot we found during our search, else error.
3848 if (!first_empty_slot)
3849 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3851 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3855 * ixgbe_set_vfta_generic - Set VLAN filter table
3856 * @hw: pointer to hardware structure
3857 * @vlan: VLAN id to write to VLAN filter
3858 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3859 * @vlan_on: boolean flag to turn on/off VLAN
3860 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3862 * Turn on/off specified VLAN in the VLAN filter table.
3864 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3865 bool vlan_on, bool vlvf_bypass)
3867 u32 regidx, vfta_delta, vfta;
3870 DEBUGFUNC("ixgbe_set_vfta_generic");
3872 if (vlan > 4095 || vind > 63)
3873 return IXGBE_ERR_PARAM;
3876 * this is a 2 part operation - first the VFTA, then the
3877 * VLVF and VLVFB if VT Mode is set
3878 * We don't write the VFTA until we know the VLVF part succeeded.
3882 * The VFTA is a bitstring made up of 128 32-bit registers
3883 * that enable the particular VLAN id, much like the MTA:
3884 * bits[11-5]: which register
3885 * bits[4-0]: which bit in the register
3888 vfta_delta = 1 << (vlan % 32);
3889 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3891 /* vfta_delta represents the difference between the current value
3892 * of vfta and the value we want in the register. Since the diff
3893 * is an XOR mask we can just update the vfta using an XOR
3895 vfta_delta &= vlan_on ? ~vfta : vfta;
3899 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3901 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3903 if (ret_val != IXGBE_SUCCESS) {
3910 /* Update VFTA now that we are ready for traffic */
3912 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3914 return IXGBE_SUCCESS;
3918 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3919 * @hw: pointer to hardware structure
3920 * @vlan: VLAN id to write to VLAN filter
3921 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3922 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
3923 * @vfta_delta: pointer to the difference between the current value of VFTA
3924 * and the desired value
3925 * @vfta: the desired value of the VFTA
3926 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3928 * Turn on/off specified bit in VLVF table.
3930 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3931 bool vlan_on, u32 *vfta_delta, u32 vfta,
3937 DEBUGFUNC("ixgbe_set_vlvf_generic");
3939 if (vlan > 4095 || vind > 63)
3940 return IXGBE_ERR_PARAM;
3942 /* If VT Mode is set
3944 * make sure the vlan is in VLVF
3945 * set the vind bit in the matching VLVFB
3947 * clear the pool bit and possibly the vind
3949 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
3950 return IXGBE_SUCCESS;
3951 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
3955 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
3957 /* set the pool bit */
3958 bits |= 1 << (vind % 32);
3962 /* clear the pool bit */
3963 bits ^= 1 << (vind % 32);
3966 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
3967 /* Clear VFTA first, then disable VLVF. Otherwise
3968 * we run the risk of stray packets leaking into
3969 * the PF via the default pool
3972 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
3974 /* disable VLVF and clear remaining bit from pool */
3975 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3976 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
3978 return IXGBE_SUCCESS;
3980 /* If there are still bits set in the VLVFB registers
3981 * for the VLAN ID indicated we need to see if the
3982 * caller is requesting that we clear the VFTA entry bit.
3983 * If the caller has requested that we clear the VFTA
3984 * entry bit but there are still pools/VFs using this VLAN
3985 * ID entry then ignore the request. We're not worried
3986 * about the case where we're turning the VFTA VLAN ID
3987 * entry bit on, only when requested to turn it off as
3988 * there may be multiple pools and/or VFs using the
3989 * VLAN ID entry. In that case we cannot clear the
3990 * VFTA bit until all pools/VFs using that VLAN ID have also
3991 * been cleared. This will be indicated by "bits" being
3997 /* record pool change and enable VLAN ID if not already enabled */
3998 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
3999 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4001 return IXGBE_SUCCESS;
4005 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4006 * @hw: pointer to hardware structure
4008 * Clears the VLAN filer table, and the VMDq index associated with the filter
4010 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4014 DEBUGFUNC("ixgbe_clear_vfta_generic");
4016 for (offset = 0; offset < hw->mac.vft_size; offset++)
4017 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4019 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4020 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4021 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4022 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4025 return IXGBE_SUCCESS;
4029 * ixgbe_check_mac_link_generic - Determine link and speed status
4030 * @hw: pointer to hardware structure
4031 * @speed: pointer to link speed
4032 * @link_up: true when link is up
4033 * @link_up_wait_to_complete: bool used to wait for link up or not
4035 * Reads the links register to determine if link is up and the current speed
4037 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4038 bool *link_up, bool link_up_wait_to_complete)
4040 u32 links_reg, links_orig;
4043 DEBUGFUNC("ixgbe_check_mac_link_generic");
4045 /* clear the old state */
4046 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4048 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4050 if (links_orig != links_reg) {
4051 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4052 links_orig, links_reg);
4055 if (link_up_wait_to_complete) {
4056 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4057 if (links_reg & IXGBE_LINKS_UP) {
4064 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4067 if (links_reg & IXGBE_LINKS_UP)
4073 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4074 case IXGBE_LINKS_SPEED_10G_82599:
4075 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4076 if (hw->mac.type >= ixgbe_mac_X550) {
4077 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4078 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4081 case IXGBE_LINKS_SPEED_1G_82599:
4082 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4084 case IXGBE_LINKS_SPEED_100_82599:
4085 *speed = IXGBE_LINK_SPEED_100_FULL;
4086 if (hw->mac.type >= ixgbe_mac_X550) {
4087 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4088 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4092 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4095 return IXGBE_SUCCESS;
4099 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4101 * @hw: pointer to hardware structure
4102 * @wwnn_prefix: the alternative WWNN prefix
4103 * @wwpn_prefix: the alternative WWPN prefix
4105 * This function will read the EEPROM from the alternative SAN MAC address
4106 * block to check the support for the alternative WWNN/WWPN prefix support.
4108 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4112 u16 alt_san_mac_blk_offset;
4114 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4116 /* clear output first */
4117 *wwnn_prefix = 0xFFFF;
4118 *wwpn_prefix = 0xFFFF;
4120 /* check if alternative SAN MAC is supported */
4121 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4122 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4123 goto wwn_prefix_err;
4125 if ((alt_san_mac_blk_offset == 0) ||
4126 (alt_san_mac_blk_offset == 0xFFFF))
4127 goto wwn_prefix_out;
4129 /* check capability in alternative san mac address block */
4130 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4131 if (hw->eeprom.ops.read(hw, offset, &caps))
4132 goto wwn_prefix_err;
4133 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4134 goto wwn_prefix_out;
4136 /* get the corresponding prefix for WWNN/WWPN */
4137 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4138 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4139 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4140 "eeprom read at offset %d failed", offset);
4143 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4144 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4145 goto wwn_prefix_err;
4148 return IXGBE_SUCCESS;
4151 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4152 "eeprom read at offset %d failed", offset);
4153 return IXGBE_SUCCESS;
4157 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4158 * @hw: pointer to hardware structure
4159 * @bs: the fcoe boot status
4161 * This function will read the FCOE boot status from the iSCSI FCOE block
4163 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4165 u16 offset, caps, flags;
4168 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4170 /* clear output first */
4171 *bs = ixgbe_fcoe_bootstatus_unavailable;
4173 /* check if FCOE IBA block is present */
4174 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4175 status = hw->eeprom.ops.read(hw, offset, &caps);
4176 if (status != IXGBE_SUCCESS)
4179 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4182 /* check if iSCSI FCOE block is populated */
4183 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4184 if (status != IXGBE_SUCCESS)
4187 if ((offset == 0) || (offset == 0xFFFF))
4190 /* read fcoe flags in iSCSI FCOE block */
4191 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4192 status = hw->eeprom.ops.read(hw, offset, &flags);
4193 if (status != IXGBE_SUCCESS)
4196 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4197 *bs = ixgbe_fcoe_bootstatus_enabled;
4199 *bs = ixgbe_fcoe_bootstatus_disabled;
4206 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4207 * @hw: pointer to hardware structure
4208 * @enable: enable or disable switch for MAC anti-spoofing
4209 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4212 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4214 int vf_target_reg = vf >> 3;
4215 int vf_target_shift = vf % 8;
4218 if (hw->mac.type == ixgbe_mac_82598EB)
4221 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4223 pfvfspoof |= (1 << vf_target_shift);
4225 pfvfspoof &= ~(1 << vf_target_shift);
4226 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4230 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4231 * @hw: pointer to hardware structure
4232 * @enable: enable or disable switch for VLAN anti-spoofing
4233 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4236 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4238 int vf_target_reg = vf >> 3;
4239 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4242 if (hw->mac.type == ixgbe_mac_82598EB)
4245 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4247 pfvfspoof |= (1 << vf_target_shift);
4249 pfvfspoof &= ~(1 << vf_target_shift);
4250 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4254 * ixgbe_get_device_caps_generic - Get additional device capabilities
4255 * @hw: pointer to hardware structure
4256 * @device_caps: the EEPROM word with the extra device capabilities
4258 * This function will read the EEPROM location for the device capabilities,
4259 * and return the word through device_caps.
4261 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4263 DEBUGFUNC("ixgbe_get_device_caps_generic");
4265 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4267 return IXGBE_SUCCESS;
4271 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4272 * @hw: pointer to hardware structure
4275 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4280 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4282 /* Enable relaxed ordering */
4283 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4284 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4285 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4286 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4289 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4290 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4291 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4292 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4293 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4299 * ixgbe_calculate_checksum - Calculate checksum for buffer
4300 * @buffer: pointer to EEPROM
4301 * @length: size of EEPROM to calculate a checksum for
4302 * Calculates the checksum for some buffer on a specified length. The
4303 * checksum calculated is returned.
4305 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4310 DEBUGFUNC("ixgbe_calculate_checksum");
4315 for (i = 0; i < length; i++)
4318 return (u8) (0 - sum);
4322 * ixgbe_host_interface_command - Issue command to manageability block
4323 * @hw: pointer to the HW structure
4324 * @buffer: contains the command to write and where the return status will
4326 * @length: length of buffer, must be multiple of 4 bytes
4327 * @timeout: time in ms to wait for command completion
4328 * @return_data: read and return data from the buffer (true) or not (false)
4329 * Needed because FW structures are big endian and decoding of
4330 * these fields can be 8 bit or 16 bit based on command. Decoding
4331 * is not easily understood without making a table of commands.
4332 * So we will leave this up to the caller to read back the data
4335 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4336 * else returns semaphore error when encountering an error acquiring
4337 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4339 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4340 u32 length, u32 timeout, bool return_data)
4342 u32 hicr, i, bi, fwsts;
4343 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4348 DEBUGFUNC("ixgbe_host_interface_command");
4350 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4351 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4352 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4354 /* Take management host interface semaphore */
4355 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4360 /* Set bit 9 of FWSTS clearing FW reset indication */
4361 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4362 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4364 /* Check that the host interface is enabled. */
4365 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4366 if ((hicr & IXGBE_HICR_EN) == 0) {
4367 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4368 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4372 /* Calculate length in DWORDs. We must be DWORD aligned */
4373 if ((length % (sizeof(u32))) != 0) {
4374 DEBUGOUT("Buffer length failure, not aligned to dword");
4375 status = IXGBE_ERR_INVALID_ARGUMENT;
4379 dword_len = length >> 2;
4381 /* The device driver writes the relevant command block
4382 * into the ram area.
4384 for (i = 0; i < dword_len; i++)
4385 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4386 i, IXGBE_CPU_TO_LE32(buffer[i]));
4388 /* Setting this bit tells the ARC that a new command is pending. */
4389 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4391 for (i = 0; i < timeout; i++) {
4392 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4393 if (!(hicr & IXGBE_HICR_C))
4398 /* Check command completion */
4399 if ((timeout != 0 && i == timeout) ||
4400 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4401 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4402 "Command has failed with no status valid.\n");
4403 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4410 /* Calculate length in DWORDs */
4411 dword_len = hdr_size >> 2;
4413 /* first pull in the header so we know the buffer length */
4414 for (bi = 0; bi < dword_len; bi++) {
4415 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4416 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4419 /* If there is any thing in data position pull it in */
4420 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4424 if (length < buf_len + hdr_size) {
4425 DEBUGOUT("Buffer not large enough for reply message.\n");
4426 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4430 /* Calculate length in DWORDs, add 3 for odd lengths */
4431 dword_len = (buf_len + 3) >> 2;
4433 /* Pull in the rest of the buffer (bi is where we left off) */
4434 for (; bi <= dword_len; bi++) {
4435 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4436 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4440 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4446 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4447 * @hw: pointer to the HW structure
4448 * @maj: driver version major number
4449 * @min: driver version minor number
4450 * @build: driver version build number
4451 * @sub: driver version sub build number
4453 * Sends driver version number to firmware through the manageability
4454 * block. On success return IXGBE_SUCCESS
4455 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4456 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4458 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4461 struct ixgbe_hic_drv_info fw_cmd;
4463 s32 ret_val = IXGBE_SUCCESS;
4465 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4467 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4468 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4469 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4470 fw_cmd.port_num = (u8)hw->bus.func;
4471 fw_cmd.ver_maj = maj;
4472 fw_cmd.ver_min = min;
4473 fw_cmd.ver_build = build;
4474 fw_cmd.ver_sub = sub;
4475 fw_cmd.hdr.checksum = 0;
4476 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4477 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4481 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4482 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4484 IXGBE_HI_COMMAND_TIMEOUT,
4486 if (ret_val != IXGBE_SUCCESS)
4489 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4490 FW_CEM_RESP_STATUS_SUCCESS)
4491 ret_val = IXGBE_SUCCESS;
4493 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4502 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4503 * @hw: pointer to hardware structure
4504 * @num_pb: number of packet buffers to allocate
4505 * @headroom: reserve n KB of headroom
4506 * @strategy: packet buffer allocation strategy
4508 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4511 u32 pbsize = hw->mac.rx_pb_size;
4513 u32 rxpktsize, txpktsize, txpbthresh;
4515 /* Reserve headroom */
4521 /* Divide remaining packet buffer space amongst the number of packet
4522 * buffers requested using supplied strategy.
4525 case PBA_STRATEGY_WEIGHTED:
4526 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4527 * buffer with 5/8 of the packet buffer space.
4529 rxpktsize = (pbsize * 5) / (num_pb * 4);
4530 pbsize -= rxpktsize * (num_pb / 2);
4531 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4532 for (; i < (num_pb / 2); i++)
4533 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4534 /* Fall through to configure remaining packet buffers */
4535 case PBA_STRATEGY_EQUAL:
4536 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4537 for (; i < num_pb; i++)
4538 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4544 /* Only support an equally distributed Tx packet buffer strategy. */
4545 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4546 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4547 for (i = 0; i < num_pb; i++) {
4548 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4549 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4552 /* Clear unused TCs, if any, to zero buffer size*/
4553 for (; i < IXGBE_MAX_PB; i++) {
4554 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4555 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4556 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4561 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4562 * @hw: pointer to the hardware structure
4564 * The 82599 and x540 MACs can experience issues if TX work is still pending
4565 * when a reset occurs. This function prevents this by flushing the PCIe
4566 * buffers on the system.
4568 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4570 u32 gcr_ext, hlreg0, i, poll;
4574 * If double reset is not requested then all transactions should
4575 * already be clear and as such there is no work to do
4577 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4581 * Set loopback enable to prevent any transmits from being sent
4582 * should the link come up. This assumes that the RXCTRL.RXEN bit
4583 * has already been cleared.
4585 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4586 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4588 /* Wait for a last completion before clearing buffers */
4589 IXGBE_WRITE_FLUSH(hw);
4593 * Before proceeding, make sure that the PCIe block does not have
4594 * transactions pending.
4596 poll = ixgbe_pcie_timeout_poll(hw);
4597 for (i = 0; i < poll; i++) {
4599 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4600 if (IXGBE_REMOVED(hw->hw_addr))
4602 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4607 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4608 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4609 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4610 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4612 /* Flush all writes and allow 20usec for all transactions to clear */
4613 IXGBE_WRITE_FLUSH(hw);
4616 /* restore previous register values */
4617 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4618 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4621 STATIC const u8 ixgbe_emc_temp_data[4] = {
4622 IXGBE_EMC_INTERNAL_DATA,
4623 IXGBE_EMC_DIODE1_DATA,
4624 IXGBE_EMC_DIODE2_DATA,
4625 IXGBE_EMC_DIODE3_DATA
4627 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4628 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4629 IXGBE_EMC_DIODE1_THERM_LIMIT,
4630 IXGBE_EMC_DIODE2_THERM_LIMIT,
4631 IXGBE_EMC_DIODE3_THERM_LIMIT
4635 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4636 * @hw: pointer to hardware structure
4637 * @data: pointer to the thermal sensor data structure
4639 * Returns the thermal sensor data structure
4641 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4643 s32 status = IXGBE_SUCCESS;
4651 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4653 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4655 /* Only support thermal sensors attached to 82599 physical port 0 */
4656 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4657 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4658 status = IXGBE_NOT_IMPLEMENTED;
4662 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4666 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4667 status = IXGBE_NOT_IMPLEMENTED;
4671 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4675 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4676 != IXGBE_ETS_TYPE_EMC) {
4677 status = IXGBE_NOT_IMPLEMENTED;
4681 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4682 if (num_sensors > IXGBE_MAX_SENSORS)
4683 num_sensors = IXGBE_MAX_SENSORS;
4685 for (i = 0; i < num_sensors; i++) {
4686 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4691 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4692 IXGBE_ETS_DATA_INDEX_SHIFT);
4693 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4694 IXGBE_ETS_DATA_LOC_SHIFT);
4696 if (sensor_location != 0) {
4697 status = hw->phy.ops.read_i2c_byte(hw,
4698 ixgbe_emc_temp_data[sensor_index],
4699 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4700 &data->sensor[i].temp);
4710 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4711 * @hw: pointer to hardware structure
4713 * Inits the thermal sensor thresholds according to the NVM map
4714 * and save off the threshold and location values into mac.thermal_sensor_data
4716 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4718 s32 status = IXGBE_SUCCESS;
4723 u8 low_thresh_delta;
4729 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4731 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4733 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4735 /* Only support thermal sensors attached to 82599 physical port 0 */
4736 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4737 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4738 return IXGBE_NOT_IMPLEMENTED;
4740 offset = IXGBE_ETS_CFG;
4741 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4743 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4744 return IXGBE_NOT_IMPLEMENTED;
4746 offset = ets_offset;
4747 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4749 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4750 != IXGBE_ETS_TYPE_EMC)
4751 return IXGBE_NOT_IMPLEMENTED;
4753 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4754 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4755 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4757 for (i = 0; i < num_sensors; i++) {
4758 offset = ets_offset + 1 + i;
4759 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4760 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4761 "eeprom read at offset %d failed",
4765 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4766 IXGBE_ETS_DATA_INDEX_SHIFT);
4767 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4768 IXGBE_ETS_DATA_LOC_SHIFT);
4769 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4771 hw->phy.ops.write_i2c_byte(hw,
4772 ixgbe_emc_therm_limit[sensor_index],
4773 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4775 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4776 data->sensor[i].location = sensor_location;
4777 data->sensor[i].caution_thresh = therm_limit;
4778 data->sensor[i].max_op_thresh = therm_limit -
4785 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4786 "eeprom read at offset %d failed", offset);
4787 return IXGBE_NOT_IMPLEMENTED;
4792 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4793 * @hw: pointer to hardware structure
4794 * @map: pointer to u8 arr for returning map
4796 * Read the rtrup2tc HW register and resolve its content into map
4798 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4802 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4803 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4804 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4805 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4809 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4814 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4815 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4816 if (hw->mac.type != ixgbe_mac_82598EB) {
4817 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4818 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4819 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4820 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4821 hw->mac.set_lben = true;
4823 hw->mac.set_lben = false;
4826 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4827 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4831 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4836 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4837 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4839 if (hw->mac.type != ixgbe_mac_82598EB) {
4840 if (hw->mac.set_lben) {
4841 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4842 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4843 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4844 hw->mac.set_lben = false;
4850 * ixgbe_mng_present - returns true when management capability is present
4851 * @hw: pointer to hardware structure
4853 bool ixgbe_mng_present(struct ixgbe_hw *hw)
4857 if (hw->mac.type < ixgbe_mac_82599EB)
4860 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4861 fwsm &= IXGBE_FWSM_MODE_MASK;
4862 return fwsm == IXGBE_FWSM_FW_MODE_PT;
4866 * ixgbe_mng_enabled - Is the manageability engine enabled?
4867 * @hw: pointer to hardware structure
4869 * Returns true if the manageability engine is enabled.
4871 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
4873 u32 fwsm, manc, factps;
4875 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
4876 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
4879 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
4880 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
4883 if (hw->mac.type <= ixgbe_mac_X540) {
4884 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
4885 if (factps & IXGBE_FACTPS_MNGCG)
4893 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
4894 * @hw: pointer to hardware structure
4895 * @speed: new link speed
4896 * @autoneg_wait_to_complete: true when waiting for completion is needed
4898 * Set the link speed in the MAC and/or PHY register and restarts link.
4900 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
4901 ixgbe_link_speed speed,
4902 bool autoneg_wait_to_complete)
4904 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4905 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
4906 s32 status = IXGBE_SUCCESS;
4909 bool autoneg, link_up = false;
4911 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
4913 /* Mask off requested but non-supported speeds */
4914 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
4915 if (status != IXGBE_SUCCESS)
4918 speed &= link_speed;
4920 /* Try each speed one by one, highest priority first. We do this in
4921 * software because 10Gb fiber doesn't support speed autonegotiation.
4923 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
4925 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
4927 /* If we already have link at this speed, just jump out */
4928 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
4929 if (status != IXGBE_SUCCESS)
4932 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
4935 /* Set the module link speed */
4936 switch (hw->phy.media_type) {
4937 case ixgbe_media_type_fiber:
4938 ixgbe_set_rate_select_speed(hw,
4939 IXGBE_LINK_SPEED_10GB_FULL);
4941 case ixgbe_media_type_fiber_qsfp:
4942 /* QSFP module automatically detects MAC link speed */
4945 DEBUGOUT("Unexpected media type.\n");
4949 /* Allow module to change analog characteristics (1G->10G) */
4952 status = ixgbe_setup_mac_link(hw,
4953 IXGBE_LINK_SPEED_10GB_FULL,
4954 autoneg_wait_to_complete);
4955 if (status != IXGBE_SUCCESS)
4958 /* Flap the Tx laser if it has not already been done */
4959 ixgbe_flap_tx_laser(hw);
4961 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
4962 * Section 73.10.2, we may have to wait up to 500ms if KR is
4963 * attempted. 82599 uses the same timing for 10g SFI.
4965 for (i = 0; i < 5; i++) {
4966 /* Wait for the link partner to also set speed */
4969 /* If we have link, just jump out */
4970 status = ixgbe_check_link(hw, &link_speed,
4972 if (status != IXGBE_SUCCESS)
4980 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
4982 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
4983 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
4985 /* If we already have link at this speed, just jump out */
4986 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
4987 if (status != IXGBE_SUCCESS)
4990 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
4993 /* Set the module link speed */
4994 switch (hw->phy.media_type) {
4995 case ixgbe_media_type_fiber:
4996 ixgbe_set_rate_select_speed(hw,
4997 IXGBE_LINK_SPEED_1GB_FULL);
4999 case ixgbe_media_type_fiber_qsfp:
5000 /* QSFP module automatically detects link speed */
5003 DEBUGOUT("Unexpected media type.\n");
5007 /* Allow module to change analog characteristics (10G->1G) */
5010 status = ixgbe_setup_mac_link(hw,
5011 IXGBE_LINK_SPEED_1GB_FULL,
5012 autoneg_wait_to_complete);
5013 if (status != IXGBE_SUCCESS)
5016 /* Flap the Tx laser if it has not already been done */
5017 ixgbe_flap_tx_laser(hw);
5019 /* Wait for the link partner to also set speed */
5022 /* If we have link, just jump out */
5023 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5024 if (status != IXGBE_SUCCESS)
5031 /* We didn't get link. Configure back to the highest speed we tried,
5032 * (if there was more than one). We call ourselves back with just the
5033 * single highest speed that the user requested.
5036 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5038 autoneg_wait_to_complete);
5041 /* Set autoneg_advertised value based on input link speed */
5042 hw->phy.autoneg_advertised = 0;
5044 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5045 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5047 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5048 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5054 * ixgbe_set_soft_rate_select_speed - Set module link speed
5055 * @hw: pointer to hardware structure
5056 * @speed: link speed to set
5058 * Set module link speed via the soft rate select.
5060 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5061 ixgbe_link_speed speed)
5067 case IXGBE_LINK_SPEED_10GB_FULL:
5068 /* one bit mask same as setting on */
5069 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5071 case IXGBE_LINK_SPEED_1GB_FULL:
5072 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5075 DEBUGOUT("Invalid fixed module speed\n");
5080 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5081 IXGBE_I2C_EEPROM_DEV_ADDR2,
5084 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5088 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5090 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5091 IXGBE_I2C_EEPROM_DEV_ADDR2,
5094 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5099 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5100 IXGBE_I2C_EEPROM_DEV_ADDR2,
5103 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5107 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5109 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5110 IXGBE_I2C_EEPROM_DEV_ADDR2,
5113 DEBUGOUT("Failed to write Rx Rate Select RS1\n");