1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_api.h"
37 #ident "$Id: ixgbe_common.c,v 1.382 2013/11/22 01:02:01 jtkirshe Exp $"
39 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
40 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
41 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
43 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
46 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
47 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
48 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
51 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
52 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
54 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
55 u16 words, u16 *data);
56 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 u16 words, u16 *data);
58 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
62 * ixgbe_init_ops_generic - Inits function ptrs
63 * @hw: pointer to the hardware structure
65 * Initialize the function pointers.
67 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
69 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
70 struct ixgbe_mac_info *mac = &hw->mac;
71 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
73 DEBUGFUNC("ixgbe_init_ops_generic");
76 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
77 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
78 if (eec & IXGBE_EEC_PRES) {
79 eeprom->ops.read = &ixgbe_read_eerd_generic;
80 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
82 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
83 eeprom->ops.read_buffer =
84 &ixgbe_read_eeprom_buffer_bit_bang_generic;
86 eeprom->ops.write = &ixgbe_write_eeprom_generic;
87 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
88 eeprom->ops.validate_checksum =
89 &ixgbe_validate_eeprom_checksum_generic;
90 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
91 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
94 mac->ops.init_hw = &ixgbe_init_hw_generic;
95 mac->ops.reset_hw = NULL;
96 mac->ops.start_hw = &ixgbe_start_hw_generic;
97 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
98 mac->ops.get_media_type = NULL;
99 mac->ops.get_supported_physical_layer = NULL;
100 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
101 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
102 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
103 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
104 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
105 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
106 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
109 mac->ops.led_on = &ixgbe_led_on_generic;
110 mac->ops.led_off = &ixgbe_led_off_generic;
111 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
112 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
114 /* RAR, Multicast, VLAN */
115 mac->ops.set_rar = &ixgbe_set_rar_generic;
116 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
117 mac->ops.insert_mac_addr = NULL;
118 mac->ops.set_vmdq = NULL;
119 mac->ops.clear_vmdq = NULL;
120 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
121 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
122 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
123 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
124 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
125 mac->ops.clear_vfta = NULL;
126 mac->ops.set_vfta = NULL;
127 mac->ops.set_vlvf = NULL;
128 mac->ops.init_uta_tables = NULL;
131 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
134 mac->ops.get_link_capabilities = NULL;
135 mac->ops.setup_link = NULL;
136 mac->ops.check_link = NULL;
138 return IXGBE_SUCCESS;
142 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
144 * @hw: pointer to hardware structure
146 * This function returns true if the device supports flow control
147 * autonegotiation, and false if it does not.
150 s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
153 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
155 switch (hw->device_id) {
156 case IXGBE_DEV_ID_X540T:
157 case IXGBE_DEV_ID_X540T1:
158 case IXGBE_DEV_ID_82599_T3_LOM:
159 return IXGBE_SUCCESS;
161 return IXGBE_ERR_FC_NOT_SUPPORTED;
166 * ixgbe_setup_fc - Set up flow control
167 * @hw: pointer to hardware structure
169 * Called at init time to set up flow control.
171 STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
173 s32 ret_val = IXGBE_SUCCESS;
174 u32 reg = 0, reg_bp = 0;
176 bool got_lock = false;
178 DEBUGFUNC("ixgbe_setup_fc");
181 * Validate the requested mode. Strict IEEE mode does not allow
182 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
184 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
185 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
186 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
187 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
192 * 10gig parts do not have a word in the EEPROM to determine the
193 * default flow control setting, so we explicitly set it to full.
195 if (hw->fc.requested_mode == ixgbe_fc_default)
196 hw->fc.requested_mode = ixgbe_fc_full;
199 * Set up the 1G and 10G flow control advertisement registers so the
200 * HW will be able to do fc autoneg once the cable is plugged in. If
201 * we link at 10G, the 1G advertisement is harmless and vice versa.
203 switch (hw->phy.media_type) {
204 case ixgbe_media_type_fiber:
205 case ixgbe_media_type_backplane:
206 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
207 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
209 case ixgbe_media_type_copper:
210 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
211 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
218 * The possible values of fc.requested_mode are:
219 * 0: Flow control is completely disabled
220 * 1: Rx flow control is enabled (we can receive pause frames,
221 * but not send pause frames).
222 * 2: Tx flow control is enabled (we can send pause frames but
223 * we do not support receiving pause frames).
224 * 3: Both Rx and Tx flow control (symmetric) are enabled.
227 switch (hw->fc.requested_mode) {
229 /* Flow control completely disabled by software override. */
230 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
231 if (hw->phy.media_type == ixgbe_media_type_backplane)
232 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
233 IXGBE_AUTOC_ASM_PAUSE);
234 else if (hw->phy.media_type == ixgbe_media_type_copper)
235 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
237 case ixgbe_fc_tx_pause:
239 * Tx Flow control is enabled, and Rx Flow control is
240 * disabled by software override.
242 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
243 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
244 if (hw->phy.media_type == ixgbe_media_type_backplane) {
245 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
246 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
247 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
248 reg_cu |= IXGBE_TAF_ASM_PAUSE;
249 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
252 case ixgbe_fc_rx_pause:
254 * Rx Flow control is enabled and Tx Flow control is
255 * disabled by software override. Since there really
256 * isn't a way to advertise that we are capable of RX
257 * Pause ONLY, we will advertise that we support both
258 * symmetric and asymmetric Rx PAUSE, as such we fall
259 * through to the fc_full statement. Later, we will
260 * disable the adapter's ability to send PAUSE frames.
263 /* Flow control (both Rx and Tx) is enabled by SW override. */
264 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
265 if (hw->phy.media_type == ixgbe_media_type_backplane)
266 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
267 IXGBE_AUTOC_ASM_PAUSE;
268 else if (hw->phy.media_type == ixgbe_media_type_copper)
269 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
272 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
273 "Flow control param set incorrectly\n");
274 ret_val = IXGBE_ERR_CONFIG;
279 if (hw->mac.type < ixgbe_mac_X540) {
281 * Enable auto-negotiation between the MAC & PHY;
282 * the MAC will advertise clause 37 flow control.
284 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
285 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
287 /* Disable AN timeout */
288 if (hw->fc.strict_ieee)
289 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
291 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
292 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
296 * AUTOC restart handles negotiation of 1G and 10G on backplane
297 * and copper. There is no need to set the PCS1GCTL register.
300 if (hw->phy.media_type == ixgbe_media_type_backplane) {
301 reg_bp |= IXGBE_AUTOC_AN_RESTART;
302 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
303 * LESM is on, likewise reset_pipeline requries the lock as
304 * it also writes AUTOC.
306 if ((hw->mac.type == ixgbe_mac_82599EB) &&
307 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
308 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
309 IXGBE_GSSR_MAC_CSR_SM);
310 if (ret_val != IXGBE_SUCCESS) {
311 ret_val = IXGBE_ERR_SWFW_SYNC;
317 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
318 if (hw->mac.type == ixgbe_mac_82599EB)
319 ixgbe_reset_pipeline_82599(hw);
322 hw->mac.ops.release_swfw_sync(hw,
323 IXGBE_GSSR_MAC_CSR_SM);
324 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
325 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
326 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
327 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
330 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
336 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
337 * @hw: pointer to hardware structure
339 * Starts the hardware by filling the bus info structure and media type, clears
340 * all on chip counters, initializes receive address registers, multicast
341 * table, VLAN filter table, calls routine to set up link and flow control
342 * settings, and leaves transmit and receive units disabled and uninitialized
344 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
349 DEBUGFUNC("ixgbe_start_hw_generic");
351 /* Set the media type */
352 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
354 /* PHY ops initialization must be done in reset_hw() */
356 /* Clear the VLAN filter table */
357 hw->mac.ops.clear_vfta(hw);
359 /* Clear statistics registers */
360 hw->mac.ops.clear_hw_cntrs(hw);
362 /* Set No Snoop Disable */
363 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
364 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
365 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
366 IXGBE_WRITE_FLUSH(hw);
368 /* Setup flow control */
369 ret_val = ixgbe_setup_fc(hw);
370 if (ret_val != IXGBE_SUCCESS)
373 /* Clear adapter stopped flag */
374 hw->adapter_stopped = false;
381 * ixgbe_start_hw_gen2 - Init sequence for common device family
382 * @hw: pointer to hw structure
384 * Performs the init sequence common to the second generation
386 * Devices in the second generation:
390 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
395 /* Clear the rate limiters */
396 for (i = 0; i < hw->mac.max_tx_queues; i++) {
397 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
398 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
400 IXGBE_WRITE_FLUSH(hw);
402 /* Disable relaxed ordering */
403 for (i = 0; i < hw->mac.max_tx_queues; i++) {
404 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
405 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
406 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
409 for (i = 0; i < hw->mac.max_rx_queues; i++) {
410 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
411 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
412 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
413 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
416 return IXGBE_SUCCESS;
420 * ixgbe_init_hw_generic - Generic hardware initialization
421 * @hw: pointer to hardware structure
423 * Initialize the hardware by resetting the hardware, filling the bus info
424 * structure and media type, clears all on chip counters, initializes receive
425 * address registers, multicast table, VLAN filter table, calls routine to set
426 * up link and flow control settings, and leaves transmit and receive units
427 * disabled and uninitialized
429 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
433 DEBUGFUNC("ixgbe_init_hw_generic");
435 /* Reset the hardware */
436 status = hw->mac.ops.reset_hw(hw);
438 if (status == IXGBE_SUCCESS) {
440 status = hw->mac.ops.start_hw(hw);
447 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
448 * @hw: pointer to hardware structure
450 * Clears all hardware statistics counters by reading them from the hardware
451 * Statistics counters are clear on read.
453 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
457 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
459 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
460 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
461 IXGBE_READ_REG(hw, IXGBE_ERRBC);
462 IXGBE_READ_REG(hw, IXGBE_MSPDC);
463 for (i = 0; i < 8; i++)
464 IXGBE_READ_REG(hw, IXGBE_MPC(i));
466 IXGBE_READ_REG(hw, IXGBE_MLFC);
467 IXGBE_READ_REG(hw, IXGBE_MRFC);
468 IXGBE_READ_REG(hw, IXGBE_RLEC);
469 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
470 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
471 if (hw->mac.type >= ixgbe_mac_82599EB) {
472 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
473 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
475 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
476 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
479 for (i = 0; i < 8; i++) {
480 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
481 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
482 if (hw->mac.type >= ixgbe_mac_82599EB) {
483 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
484 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
486 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
487 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
490 if (hw->mac.type >= ixgbe_mac_82599EB)
491 for (i = 0; i < 8; i++)
492 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
493 IXGBE_READ_REG(hw, IXGBE_PRC64);
494 IXGBE_READ_REG(hw, IXGBE_PRC127);
495 IXGBE_READ_REG(hw, IXGBE_PRC255);
496 IXGBE_READ_REG(hw, IXGBE_PRC511);
497 IXGBE_READ_REG(hw, IXGBE_PRC1023);
498 IXGBE_READ_REG(hw, IXGBE_PRC1522);
499 IXGBE_READ_REG(hw, IXGBE_GPRC);
500 IXGBE_READ_REG(hw, IXGBE_BPRC);
501 IXGBE_READ_REG(hw, IXGBE_MPRC);
502 IXGBE_READ_REG(hw, IXGBE_GPTC);
503 IXGBE_READ_REG(hw, IXGBE_GORCL);
504 IXGBE_READ_REG(hw, IXGBE_GORCH);
505 IXGBE_READ_REG(hw, IXGBE_GOTCL);
506 IXGBE_READ_REG(hw, IXGBE_GOTCH);
507 if (hw->mac.type == ixgbe_mac_82598EB)
508 for (i = 0; i < 8; i++)
509 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
510 IXGBE_READ_REG(hw, IXGBE_RUC);
511 IXGBE_READ_REG(hw, IXGBE_RFC);
512 IXGBE_READ_REG(hw, IXGBE_ROC);
513 IXGBE_READ_REG(hw, IXGBE_RJC);
514 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
515 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
516 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
517 IXGBE_READ_REG(hw, IXGBE_TORL);
518 IXGBE_READ_REG(hw, IXGBE_TORH);
519 IXGBE_READ_REG(hw, IXGBE_TPR);
520 IXGBE_READ_REG(hw, IXGBE_TPT);
521 IXGBE_READ_REG(hw, IXGBE_PTC64);
522 IXGBE_READ_REG(hw, IXGBE_PTC127);
523 IXGBE_READ_REG(hw, IXGBE_PTC255);
524 IXGBE_READ_REG(hw, IXGBE_PTC511);
525 IXGBE_READ_REG(hw, IXGBE_PTC1023);
526 IXGBE_READ_REG(hw, IXGBE_PTC1522);
527 IXGBE_READ_REG(hw, IXGBE_MPTC);
528 IXGBE_READ_REG(hw, IXGBE_BPTC);
529 for (i = 0; i < 16; i++) {
530 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
531 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
532 if (hw->mac.type >= ixgbe_mac_82599EB) {
533 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
534 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
535 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
536 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
537 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
539 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
540 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
544 if (hw->mac.type == ixgbe_mac_X540) {
546 ixgbe_identify_phy(hw);
547 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
548 IXGBE_MDIO_PCS_DEV_TYPE, &i);
549 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
550 IXGBE_MDIO_PCS_DEV_TYPE, &i);
551 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
552 IXGBE_MDIO_PCS_DEV_TYPE, &i);
553 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
554 IXGBE_MDIO_PCS_DEV_TYPE, &i);
557 return IXGBE_SUCCESS;
561 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
562 * @hw: pointer to hardware structure
563 * @pba_num: stores the part number string from the EEPROM
564 * @pba_num_size: part number string buffer length
566 * Reads the part number string from the EEPROM.
568 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
577 DEBUGFUNC("ixgbe_read_pba_string_generic");
579 if (pba_num == NULL) {
580 DEBUGOUT("PBA string buffer was null\n");
581 return IXGBE_ERR_INVALID_ARGUMENT;
584 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
586 DEBUGOUT("NVM Read Error\n");
590 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
592 DEBUGOUT("NVM Read Error\n");
597 * if data is not ptr guard the PBA must be in legacy format which
598 * means pba_ptr is actually our second data word for the PBA number
599 * and we can decode it into an ascii string
601 if (data != IXGBE_PBANUM_PTR_GUARD) {
602 DEBUGOUT("NVM PBA number is not stored as string\n");
604 /* we will need 11 characters to store the PBA */
605 if (pba_num_size < 11) {
606 DEBUGOUT("PBA string buffer too small\n");
607 return IXGBE_ERR_NO_SPACE;
610 /* extract hex string from data and pba_ptr */
611 pba_num[0] = (data >> 12) & 0xF;
612 pba_num[1] = (data >> 8) & 0xF;
613 pba_num[2] = (data >> 4) & 0xF;
614 pba_num[3] = data & 0xF;
615 pba_num[4] = (pba_ptr >> 12) & 0xF;
616 pba_num[5] = (pba_ptr >> 8) & 0xF;
619 pba_num[8] = (pba_ptr >> 4) & 0xF;
620 pba_num[9] = pba_ptr & 0xF;
622 /* put a null character on the end of our string */
625 /* switch all the data but the '-' to hex char */
626 for (offset = 0; offset < 10; offset++) {
627 if (pba_num[offset] < 0xA)
628 pba_num[offset] += '0';
629 else if (pba_num[offset] < 0x10)
630 pba_num[offset] += 'A' - 0xA;
633 return IXGBE_SUCCESS;
636 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
638 DEBUGOUT("NVM Read Error\n");
642 if (length == 0xFFFF || length == 0) {
643 DEBUGOUT("NVM PBA number section invalid length\n");
644 return IXGBE_ERR_PBA_SECTION;
647 /* check if pba_num buffer is big enough */
648 if (pba_num_size < (((u32)length * 2) - 1)) {
649 DEBUGOUT("PBA string buffer too small\n");
650 return IXGBE_ERR_NO_SPACE;
653 /* trim pba length from start of string */
657 for (offset = 0; offset < length; offset++) {
658 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
660 DEBUGOUT("NVM Read Error\n");
663 pba_num[offset * 2] = (u8)(data >> 8);
664 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
666 pba_num[offset * 2] = '\0';
668 return IXGBE_SUCCESS;
672 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
673 * @hw: pointer to hardware structure
674 * @pba_num: stores the part number from the EEPROM
676 * Reads the part number from the EEPROM.
678 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
683 DEBUGFUNC("ixgbe_read_pba_num_generic");
685 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
687 DEBUGOUT("NVM Read Error\n");
689 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
690 DEBUGOUT("NVM Not supported\n");
691 return IXGBE_NOT_IMPLEMENTED;
693 *pba_num = (u32)(data << 16);
695 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
697 DEBUGOUT("NVM Read Error\n");
702 return IXGBE_SUCCESS;
707 * @hw: pointer to the HW structure
708 * @eeprom_buf: optional pointer to EEPROM image
709 * @eeprom_buf_size: size of EEPROM image in words
710 * @max_pba_block_size: PBA block size limit
711 * @pba: pointer to output PBA structure
713 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
714 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
717 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
718 u32 eeprom_buf_size, u16 max_pba_block_size,
719 struct ixgbe_pba *pba)
725 return IXGBE_ERR_PARAM;
727 if (eeprom_buf == NULL) {
728 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
733 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
734 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
735 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
737 return IXGBE_ERR_PARAM;
741 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
742 if (pba->pba_block == NULL)
743 return IXGBE_ERR_PARAM;
745 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
751 if (pba_block_size > max_pba_block_size)
752 return IXGBE_ERR_PARAM;
754 if (eeprom_buf == NULL) {
755 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
761 if (eeprom_buf_size > (u32)(pba->word[1] +
762 pba->pba_block[0])) {
763 memcpy(pba->pba_block,
764 &eeprom_buf[pba->word[1]],
765 pba_block_size * sizeof(u16));
767 return IXGBE_ERR_PARAM;
772 return IXGBE_SUCCESS;
776 * ixgbe_write_pba_raw
777 * @hw: pointer to the HW structure
778 * @eeprom_buf: optional pointer to EEPROM image
779 * @eeprom_buf_size: size of EEPROM image in words
780 * @pba: pointer to PBA structure
782 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
783 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
786 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
787 u32 eeprom_buf_size, struct ixgbe_pba *pba)
792 return IXGBE_ERR_PARAM;
794 if (eeprom_buf == NULL) {
795 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
800 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
801 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
802 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
804 return IXGBE_ERR_PARAM;
808 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
809 if (pba->pba_block == NULL)
810 return IXGBE_ERR_PARAM;
812 if (eeprom_buf == NULL) {
813 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
819 if (eeprom_buf_size > (u32)(pba->word[1] +
820 pba->pba_block[0])) {
821 memcpy(&eeprom_buf[pba->word[1]],
823 pba->pba_block[0] * sizeof(u16));
825 return IXGBE_ERR_PARAM;
830 return IXGBE_SUCCESS;
834 * ixgbe_get_pba_block_size
835 * @hw: pointer to the HW structure
836 * @eeprom_buf: optional pointer to EEPROM image
837 * @eeprom_buf_size: size of EEPROM image in words
838 * @pba_data_size: pointer to output variable
840 * Returns the size of the PBA block in words. Function operates on EEPROM
841 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
845 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
846 u32 eeprom_buf_size, u16 *pba_block_size)
852 DEBUGFUNC("ixgbe_get_pba_block_size");
854 if (eeprom_buf == NULL) {
855 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
860 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
861 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
862 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
864 return IXGBE_ERR_PARAM;
868 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
869 if (eeprom_buf == NULL) {
870 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
875 if (eeprom_buf_size > pba_word[1])
876 length = eeprom_buf[pba_word[1] + 0];
878 return IXGBE_ERR_PARAM;
881 if (length == 0xFFFF || length == 0)
882 return IXGBE_ERR_PBA_SECTION;
884 /* PBA number in legacy format, there is no PBA Block. */
888 if (pba_block_size != NULL)
889 *pba_block_size = length;
891 return IXGBE_SUCCESS;
895 * ixgbe_get_mac_addr_generic - Generic get MAC address
896 * @hw: pointer to hardware structure
897 * @mac_addr: Adapter MAC address
899 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
900 * A reset of the adapter must be performed prior to calling this function
901 * in order for the MAC address to have been loaded from the EEPROM into RAR0
903 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
909 DEBUGFUNC("ixgbe_get_mac_addr_generic");
911 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
912 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
914 for (i = 0; i < 4; i++)
915 mac_addr[i] = (u8)(rar_low >> (i*8));
917 for (i = 0; i < 2; i++)
918 mac_addr[i+4] = (u8)(rar_high >> (i*8));
920 return IXGBE_SUCCESS;
924 * ixgbe_get_bus_info_generic - Generic set PCI bus info
925 * @hw: pointer to hardware structure
927 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
929 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
931 struct ixgbe_mac_info *mac = &hw->mac;
934 DEBUGFUNC("ixgbe_get_bus_info_generic");
936 hw->bus.type = ixgbe_bus_type_pci_express;
938 /* Get the negotiated link width and speed from PCI config space */
939 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
941 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
942 case IXGBE_PCI_LINK_WIDTH_1:
943 hw->bus.width = ixgbe_bus_width_pcie_x1;
945 case IXGBE_PCI_LINK_WIDTH_2:
946 hw->bus.width = ixgbe_bus_width_pcie_x2;
948 case IXGBE_PCI_LINK_WIDTH_4:
949 hw->bus.width = ixgbe_bus_width_pcie_x4;
951 case IXGBE_PCI_LINK_WIDTH_8:
952 hw->bus.width = ixgbe_bus_width_pcie_x8;
955 hw->bus.width = ixgbe_bus_width_unknown;
959 switch (link_status & IXGBE_PCI_LINK_SPEED) {
960 case IXGBE_PCI_LINK_SPEED_2500:
961 hw->bus.speed = ixgbe_bus_speed_2500;
963 case IXGBE_PCI_LINK_SPEED_5000:
964 hw->bus.speed = ixgbe_bus_speed_5000;
966 case IXGBE_PCI_LINK_SPEED_8000:
967 hw->bus.speed = ixgbe_bus_speed_8000;
970 hw->bus.speed = ixgbe_bus_speed_unknown;
974 mac->ops.set_lan_id(hw);
976 return IXGBE_SUCCESS;
980 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
981 * @hw: pointer to the HW structure
983 * Determines the LAN function id by reading memory-mapped registers
984 * and swaps the port value if requested.
986 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
988 struct ixgbe_bus_info *bus = &hw->bus;
991 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
993 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
994 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
995 bus->lan_id = bus->func;
997 /* check for a port swap */
998 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
999 if (reg & IXGBE_FACTPS_LFS)
1004 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1005 * @hw: pointer to hardware structure
1007 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1008 * disables transmit and receive units. The adapter_stopped flag is used by
1009 * the shared code and drivers to determine if the adapter is in a stopped
1010 * state and should not touch the hardware.
1012 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1017 DEBUGFUNC("ixgbe_stop_adapter_generic");
1020 * Set the adapter_stopped flag so other driver functions stop touching
1023 hw->adapter_stopped = true;
1025 /* Disable the receive unit */
1026 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1028 /* Clear interrupt mask to stop interrupts from being generated */
1029 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1031 /* Clear any pending interrupts, flush previous writes */
1032 IXGBE_READ_REG(hw, IXGBE_EICR);
1034 /* Disable the transmit unit. Each queue must be disabled. */
1035 for (i = 0; i < hw->mac.max_tx_queues; i++)
1036 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1038 /* Disable the receive unit by stopping each queue */
1039 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1040 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1041 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1042 reg_val |= IXGBE_RXDCTL_SWFLSH;
1043 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1046 /* flush all queues disables */
1047 IXGBE_WRITE_FLUSH(hw);
1051 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1052 * access and verify no pending requests
1054 return ixgbe_disable_pcie_master(hw);
1058 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1059 * @hw: pointer to hardware structure
1060 * @index: led number to turn on
1062 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1064 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1066 DEBUGFUNC("ixgbe_led_on_generic");
1068 /* To turn on the LED, set mode to ON. */
1069 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1070 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1071 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1072 IXGBE_WRITE_FLUSH(hw);
1074 return IXGBE_SUCCESS;
1078 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1079 * @hw: pointer to hardware structure
1080 * @index: led number to turn off
1082 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1084 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1086 DEBUGFUNC("ixgbe_led_off_generic");
1088 /* To turn off the LED, set mode to OFF. */
1089 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1090 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1091 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1092 IXGBE_WRITE_FLUSH(hw);
1094 return IXGBE_SUCCESS;
1098 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1099 * @hw: pointer to hardware structure
1101 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1102 * ixgbe_hw struct in order to set up EEPROM access.
1104 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1106 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1110 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1112 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1113 eeprom->type = ixgbe_eeprom_none;
1114 /* Set default semaphore delay to 10ms which is a well
1116 eeprom->semaphore_delay = 10;
1117 /* Clear EEPROM page size, it will be initialized as needed */
1118 eeprom->word_page_size = 0;
1121 * Check for EEPROM present first.
1122 * If not present leave as none
1124 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1125 if (eec & IXGBE_EEC_PRES) {
1126 eeprom->type = ixgbe_eeprom_spi;
1129 * SPI EEPROM is assumed here. This code would need to
1130 * change if a future EEPROM is not SPI.
1132 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1133 IXGBE_EEC_SIZE_SHIFT);
1134 eeprom->word_size = 1 << (eeprom_size +
1135 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1138 if (eec & IXGBE_EEC_ADDR_SIZE)
1139 eeprom->address_bits = 16;
1141 eeprom->address_bits = 8;
1142 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1143 "%d\n", eeprom->type, eeprom->word_size,
1144 eeprom->address_bits);
1147 return IXGBE_SUCCESS;
1151 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1152 * @hw: pointer to hardware structure
1153 * @offset: offset within the EEPROM to write
1154 * @words: number of word(s)
1155 * @data: 16 bit word(s) to write to EEPROM
1157 * Reads 16 bit word(s) from EEPROM through bit-bang method
1159 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1160 u16 words, u16 *data)
1162 s32 status = IXGBE_SUCCESS;
1165 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1167 hw->eeprom.ops.init_params(hw);
1170 status = IXGBE_ERR_INVALID_ARGUMENT;
1174 if (offset + words > hw->eeprom.word_size) {
1175 status = IXGBE_ERR_EEPROM;
1180 * The EEPROM page size cannot be queried from the chip. We do lazy
1181 * initialization. It is worth to do that when we write large buffer.
1183 if ((hw->eeprom.word_page_size == 0) &&
1184 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1185 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1188 * We cannot hold synchronization semaphores for too long
1189 * to avoid other entity starvation. However it is more efficient
1190 * to read in bursts than synchronizing access for each word.
1192 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1193 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1194 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1195 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1198 if (status != IXGBE_SUCCESS)
1207 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1208 * @hw: pointer to hardware structure
1209 * @offset: offset within the EEPROM to be written to
1210 * @words: number of word(s)
1211 * @data: 16 bit word(s) to be written to the EEPROM
1213 * If ixgbe_eeprom_update_checksum is not called after this function, the
1214 * EEPROM will most likely contain an invalid checksum.
1216 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1217 u16 words, u16 *data)
1223 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1225 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1227 /* Prepare the EEPROM for writing */
1228 status = ixgbe_acquire_eeprom(hw);
1230 if (status == IXGBE_SUCCESS) {
1231 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1232 ixgbe_release_eeprom(hw);
1233 status = IXGBE_ERR_EEPROM;
1237 if (status == IXGBE_SUCCESS) {
1238 for (i = 0; i < words; i++) {
1239 ixgbe_standby_eeprom(hw);
1241 /* Send the WRITE ENABLE command (8 bit opcode ) */
1242 ixgbe_shift_out_eeprom_bits(hw,
1243 IXGBE_EEPROM_WREN_OPCODE_SPI,
1244 IXGBE_EEPROM_OPCODE_BITS);
1246 ixgbe_standby_eeprom(hw);
1249 * Some SPI eeproms use the 8th address bit embedded
1252 if ((hw->eeprom.address_bits == 8) &&
1253 ((offset + i) >= 128))
1254 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1256 /* Send the Write command (8-bit opcode + addr) */
1257 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1258 IXGBE_EEPROM_OPCODE_BITS);
1259 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1260 hw->eeprom.address_bits);
1262 page_size = hw->eeprom.word_page_size;
1264 /* Send the data in burst via SPI*/
1267 word = (word >> 8) | (word << 8);
1268 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1273 /* do not wrap around page */
1274 if (((offset + i) & (page_size - 1)) ==
1277 } while (++i < words);
1279 ixgbe_standby_eeprom(hw);
1282 /* Done with writing - release the EEPROM */
1283 ixgbe_release_eeprom(hw);
1290 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1291 * @hw: pointer to hardware structure
1292 * @offset: offset within the EEPROM to be written to
1293 * @data: 16 bit word to be written to the EEPROM
1295 * If ixgbe_eeprom_update_checksum is not called after this function, the
1296 * EEPROM will most likely contain an invalid checksum.
1298 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1302 DEBUGFUNC("ixgbe_write_eeprom_generic");
1304 hw->eeprom.ops.init_params(hw);
1306 if (offset >= hw->eeprom.word_size) {
1307 status = IXGBE_ERR_EEPROM;
1311 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1318 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1319 * @hw: pointer to hardware structure
1320 * @offset: offset within the EEPROM to be read
1321 * @data: read 16 bit words(s) from EEPROM
1322 * @words: number of word(s)
1324 * Reads 16 bit word(s) from EEPROM through bit-bang method
1326 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1327 u16 words, u16 *data)
1329 s32 status = IXGBE_SUCCESS;
1332 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1334 hw->eeprom.ops.init_params(hw);
1337 status = IXGBE_ERR_INVALID_ARGUMENT;
1341 if (offset + words > hw->eeprom.word_size) {
1342 status = IXGBE_ERR_EEPROM;
1347 * We cannot hold synchronization semaphores for too long
1348 * to avoid other entity starvation. However it is more efficient
1349 * to read in bursts than synchronizing access for each word.
1351 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1352 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1353 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1355 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1358 if (status != IXGBE_SUCCESS)
1367 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1368 * @hw: pointer to hardware structure
1369 * @offset: offset within the EEPROM to be read
1370 * @words: number of word(s)
1371 * @data: read 16 bit word(s) from EEPROM
1373 * Reads 16 bit word(s) from EEPROM through bit-bang method
1375 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1376 u16 words, u16 *data)
1380 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1383 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1385 /* Prepare the EEPROM for reading */
1386 status = ixgbe_acquire_eeprom(hw);
1388 if (status == IXGBE_SUCCESS) {
1389 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1390 ixgbe_release_eeprom(hw);
1391 status = IXGBE_ERR_EEPROM;
1395 if (status == IXGBE_SUCCESS) {
1396 for (i = 0; i < words; i++) {
1397 ixgbe_standby_eeprom(hw);
1399 * Some SPI eeproms use the 8th address bit embedded
1402 if ((hw->eeprom.address_bits == 8) &&
1403 ((offset + i) >= 128))
1404 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1406 /* Send the READ command (opcode + addr) */
1407 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1408 IXGBE_EEPROM_OPCODE_BITS);
1409 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1410 hw->eeprom.address_bits);
1412 /* Read the data. */
1413 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1414 data[i] = (word_in >> 8) | (word_in << 8);
1417 /* End this read operation */
1418 ixgbe_release_eeprom(hw);
1425 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1426 * @hw: pointer to hardware structure
1427 * @offset: offset within the EEPROM to be read
1428 * @data: read 16 bit value from EEPROM
1430 * Reads 16 bit value from EEPROM through bit-bang method
1432 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1437 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1439 hw->eeprom.ops.init_params(hw);
1441 if (offset >= hw->eeprom.word_size) {
1442 status = IXGBE_ERR_EEPROM;
1446 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1453 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1454 * @hw: pointer to hardware structure
1455 * @offset: offset of word in the EEPROM to read
1456 * @words: number of word(s)
1457 * @data: 16 bit word(s) from the EEPROM
1459 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1461 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1462 u16 words, u16 *data)
1465 s32 status = IXGBE_SUCCESS;
1468 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1470 hw->eeprom.ops.init_params(hw);
1473 status = IXGBE_ERR_INVALID_ARGUMENT;
1474 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1478 if (offset >= hw->eeprom.word_size) {
1479 status = IXGBE_ERR_EEPROM;
1480 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1484 for (i = 0; i < words; i++) {
1485 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1486 IXGBE_EEPROM_RW_REG_START;
1488 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1489 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1491 if (status == IXGBE_SUCCESS) {
1492 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1493 IXGBE_EEPROM_RW_REG_DATA);
1495 DEBUGOUT("Eeprom read timed out\n");
1504 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1505 * @hw: pointer to hardware structure
1506 * @offset: offset within the EEPROM to be used as a scratch pad
1508 * Discover EEPROM page size by writing marching data at given offset.
1509 * This function is called only when we are writing a new large buffer
1510 * at given offset so the data would be overwritten anyway.
1512 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1515 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1516 s32 status = IXGBE_SUCCESS;
1519 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1521 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1524 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1525 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1526 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1527 hw->eeprom.word_page_size = 0;
1528 if (status != IXGBE_SUCCESS)
1531 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1532 if (status != IXGBE_SUCCESS)
1536 * When writing in burst more than the actual page size
1537 * EEPROM address wraps around current page.
1539 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1541 DEBUGOUT1("Detected EEPROM page size = %d words.",
1542 hw->eeprom.word_page_size);
1548 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1549 * @hw: pointer to hardware structure
1550 * @offset: offset of word in the EEPROM to read
1551 * @data: word read from the EEPROM
1553 * Reads a 16 bit word from the EEPROM using the EERD register.
1555 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1557 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1561 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1562 * @hw: pointer to hardware structure
1563 * @offset: offset of word in the EEPROM to write
1564 * @words: number of word(s)
1565 * @data: word(s) write to the EEPROM
1567 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1569 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1570 u16 words, u16 *data)
1573 s32 status = IXGBE_SUCCESS;
1576 DEBUGFUNC("ixgbe_write_eewr_generic");
1578 hw->eeprom.ops.init_params(hw);
1581 status = IXGBE_ERR_INVALID_ARGUMENT;
1582 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1586 if (offset >= hw->eeprom.word_size) {
1587 status = IXGBE_ERR_EEPROM;
1588 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1592 for (i = 0; i < words; i++) {
1593 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1594 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1595 IXGBE_EEPROM_RW_REG_START;
1597 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1598 if (status != IXGBE_SUCCESS) {
1599 DEBUGOUT("Eeprom write EEWR timed out\n");
1603 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1605 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1606 if (status != IXGBE_SUCCESS) {
1607 DEBUGOUT("Eeprom write EEWR timed out\n");
1617 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1618 * @hw: pointer to hardware structure
1619 * @offset: offset of word in the EEPROM to write
1620 * @data: word write to the EEPROM
1622 * Write a 16 bit word to the EEPROM using the EEWR register.
1624 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1626 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1630 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1631 * @hw: pointer to hardware structure
1632 * @ee_reg: EEPROM flag for polling
1634 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1635 * read or write is done respectively.
1637 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1641 s32 status = IXGBE_ERR_EEPROM;
1643 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1645 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1646 if (ee_reg == IXGBE_NVM_POLL_READ)
1647 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1649 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1651 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1652 status = IXGBE_SUCCESS;
1658 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1659 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1660 "EEPROM read/write done polling timed out");
1666 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1667 * @hw: pointer to hardware structure
1669 * Prepares EEPROM for access using bit-bang method. This function should
1670 * be called before issuing a command to the EEPROM.
1672 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1674 s32 status = IXGBE_SUCCESS;
1678 DEBUGFUNC("ixgbe_acquire_eeprom");
1680 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1682 status = IXGBE_ERR_SWFW_SYNC;
1684 if (status == IXGBE_SUCCESS) {
1685 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1687 /* Request EEPROM Access */
1688 eec |= IXGBE_EEC_REQ;
1689 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1691 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1692 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1693 if (eec & IXGBE_EEC_GNT)
1698 /* Release if grant not acquired */
1699 if (!(eec & IXGBE_EEC_GNT)) {
1700 eec &= ~IXGBE_EEC_REQ;
1701 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1702 DEBUGOUT("Could not acquire EEPROM grant\n");
1704 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1705 status = IXGBE_ERR_EEPROM;
1708 /* Setup EEPROM for Read/Write */
1709 if (status == IXGBE_SUCCESS) {
1710 /* Clear CS and SK */
1711 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1712 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1713 IXGBE_WRITE_FLUSH(hw);
1721 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1722 * @hw: pointer to hardware structure
1724 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1726 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1728 s32 status = IXGBE_ERR_EEPROM;
1733 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1736 /* Get SMBI software semaphore between device drivers first */
1737 for (i = 0; i < timeout; i++) {
1739 * If the SMBI bit is 0 when we read it, then the bit will be
1740 * set and we have the semaphore
1742 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1743 if (!(swsm & IXGBE_SWSM_SMBI)) {
1744 status = IXGBE_SUCCESS;
1751 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1754 * this release is particularly important because our attempts
1755 * above to get the semaphore may have succeeded, and if there
1756 * was a timeout, we should unconditionally clear the semaphore
1757 * bits to free the driver to make progress
1759 ixgbe_release_eeprom_semaphore(hw);
1764 * If the SMBI bit is 0 when we read it, then the bit will be
1765 * set and we have the semaphore
1767 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1768 if (!(swsm & IXGBE_SWSM_SMBI))
1769 status = IXGBE_SUCCESS;
1772 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1773 if (status == IXGBE_SUCCESS) {
1774 for (i = 0; i < timeout; i++) {
1775 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1777 /* Set the SW EEPROM semaphore bit to request access */
1778 swsm |= IXGBE_SWSM_SWESMBI;
1779 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1782 * If we set the bit successfully then we got the
1785 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1786 if (swsm & IXGBE_SWSM_SWESMBI)
1793 * Release semaphores and return error if SW EEPROM semaphore
1794 * was not granted because we don't have access to the EEPROM
1797 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1798 "SWESMBI Software EEPROM semaphore not granted.\n");
1799 ixgbe_release_eeprom_semaphore(hw);
1800 status = IXGBE_ERR_EEPROM;
1803 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1804 "Software semaphore SMBI between device drivers "
1812 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1813 * @hw: pointer to hardware structure
1815 * This function clears hardware semaphore bits.
1817 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1821 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1823 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1825 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1826 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1827 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1828 IXGBE_WRITE_FLUSH(hw);
1832 * ixgbe_ready_eeprom - Polls for EEPROM ready
1833 * @hw: pointer to hardware structure
1835 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1837 s32 status = IXGBE_SUCCESS;
1841 DEBUGFUNC("ixgbe_ready_eeprom");
1844 * Read "Status Register" repeatedly until the LSB is cleared. The
1845 * EEPROM will signal that the command has been completed by clearing
1846 * bit 0 of the internal status register. If it's not cleared within
1847 * 5 milliseconds, then error out.
1849 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1850 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1851 IXGBE_EEPROM_OPCODE_BITS);
1852 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1853 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1857 ixgbe_standby_eeprom(hw);
1861 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1862 * devices (and only 0-5mSec on 5V devices)
1864 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1865 DEBUGOUT("SPI EEPROM Status error\n");
1866 status = IXGBE_ERR_EEPROM;
1873 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1874 * @hw: pointer to hardware structure
1876 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1880 DEBUGFUNC("ixgbe_standby_eeprom");
1882 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1884 /* Toggle CS to flush commands */
1885 eec |= IXGBE_EEC_CS;
1886 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1887 IXGBE_WRITE_FLUSH(hw);
1889 eec &= ~IXGBE_EEC_CS;
1890 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1891 IXGBE_WRITE_FLUSH(hw);
1896 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1897 * @hw: pointer to hardware structure
1898 * @data: data to send to the EEPROM
1899 * @count: number of bits to shift out
1901 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1908 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1910 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1913 * Mask is used to shift "count" bits of "data" out to the EEPROM
1914 * one bit at a time. Determine the starting bit based on count
1916 mask = 0x01 << (count - 1);
1918 for (i = 0; i < count; i++) {
1920 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1921 * "1", and then raising and then lowering the clock (the SK
1922 * bit controls the clock input to the EEPROM). A "0" is
1923 * shifted out to the EEPROM by setting "DI" to "0" and then
1924 * raising and then lowering the clock.
1927 eec |= IXGBE_EEC_DI;
1929 eec &= ~IXGBE_EEC_DI;
1931 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1932 IXGBE_WRITE_FLUSH(hw);
1936 ixgbe_raise_eeprom_clk(hw, &eec);
1937 ixgbe_lower_eeprom_clk(hw, &eec);
1940 * Shift mask to signify next bit of data to shift in to the
1946 /* We leave the "DI" bit set to "0" when we leave this routine. */
1947 eec &= ~IXGBE_EEC_DI;
1948 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1949 IXGBE_WRITE_FLUSH(hw);
1953 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1954 * @hw: pointer to hardware structure
1956 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1962 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1965 * In order to read a register from the EEPROM, we need to shift
1966 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1967 * the clock input to the EEPROM (setting the SK bit), and then reading
1968 * the value of the "DO" bit. During this "shifting in" process the
1969 * "DI" bit should always be clear.
1971 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1973 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1975 for (i = 0; i < count; i++) {
1977 ixgbe_raise_eeprom_clk(hw, &eec);
1979 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1981 eec &= ~(IXGBE_EEC_DI);
1982 if (eec & IXGBE_EEC_DO)
1985 ixgbe_lower_eeprom_clk(hw, &eec);
1992 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1993 * @hw: pointer to hardware structure
1994 * @eec: EEC register's current value
1996 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
1998 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2001 * Raise the clock input to the EEPROM
2002 * (setting the SK bit), then delay
2004 *eec = *eec | IXGBE_EEC_SK;
2005 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2006 IXGBE_WRITE_FLUSH(hw);
2011 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2012 * @hw: pointer to hardware structure
2013 * @eecd: EECD's current value
2015 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2017 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2020 * Lower the clock input to the EEPROM (clearing the SK bit), then
2023 *eec = *eec & ~IXGBE_EEC_SK;
2024 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2025 IXGBE_WRITE_FLUSH(hw);
2030 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2031 * @hw: pointer to hardware structure
2033 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2037 DEBUGFUNC("ixgbe_release_eeprom");
2039 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2041 eec |= IXGBE_EEC_CS; /* Pull CS high */
2042 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2044 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2045 IXGBE_WRITE_FLUSH(hw);
2049 /* Stop requesting EEPROM access */
2050 eec &= ~IXGBE_EEC_REQ;
2051 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2053 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2055 /* Delay before attempt to obtain semaphore again to allow FW access */
2056 msec_delay(hw->eeprom.semaphore_delay);
2060 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2061 * @hw: pointer to hardware structure
2063 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2072 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2074 /* Include 0x0-0x3F in the checksum */
2075 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2076 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2077 DEBUGOUT("EEPROM read failed\n");
2083 /* Include all data from pointers except for the fw pointer */
2084 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2085 hw->eeprom.ops.read(hw, i, &pointer);
2087 /* Make sure the pointer seems valid */
2088 if (pointer != 0xFFFF && pointer != 0) {
2089 hw->eeprom.ops.read(hw, pointer, &length);
2091 if (length != 0xFFFF && length != 0) {
2092 for (j = pointer+1; j <= pointer+length; j++) {
2093 hw->eeprom.ops.read(hw, j, &word);
2100 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2106 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2107 * @hw: pointer to hardware structure
2108 * @checksum_val: calculated checksum
2110 * Performs checksum calculation and validates the EEPROM checksum. If the
2111 * caller does not need checksum_val, the value can be NULL.
2113 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2118 u16 read_checksum = 0;
2120 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2123 * Read the first word from the EEPROM. If this times out or fails, do
2124 * not continue or we could be in for a very long wait while every
2127 status = hw->eeprom.ops.read(hw, 0, &checksum);
2129 if (status == IXGBE_SUCCESS) {
2130 checksum = hw->eeprom.ops.calc_checksum(hw);
2132 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2135 * Verify read checksum from EEPROM is the same as
2136 * calculated checksum
2138 if (read_checksum != checksum)
2139 status = IXGBE_ERR_EEPROM_CHECKSUM;
2141 /* If the user cares, return the calculated checksum */
2143 *checksum_val = checksum;
2145 DEBUGOUT("EEPROM read failed\n");
2152 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2153 * @hw: pointer to hardware structure
2155 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2160 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2163 * Read the first word from the EEPROM. If this times out or fails, do
2164 * not continue or we could be in for a very long wait while every
2167 status = hw->eeprom.ops.read(hw, 0, &checksum);
2169 if (status == IXGBE_SUCCESS) {
2170 checksum = hw->eeprom.ops.calc_checksum(hw);
2171 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2174 DEBUGOUT("EEPROM read failed\n");
2181 * ixgbe_validate_mac_addr - Validate MAC address
2182 * @mac_addr: pointer to MAC address.
2184 * Tests a MAC address to ensure it is a valid Individual Address
2186 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2188 s32 status = IXGBE_SUCCESS;
2190 DEBUGFUNC("ixgbe_validate_mac_addr");
2192 /* Make sure it is not a multicast address */
2193 if (IXGBE_IS_MULTICAST(mac_addr)) {
2194 DEBUGOUT("MAC address is multicast\n");
2195 status = IXGBE_ERR_INVALID_MAC_ADDR;
2196 /* Not a broadcast address */
2197 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2198 DEBUGOUT("MAC address is broadcast\n");
2199 status = IXGBE_ERR_INVALID_MAC_ADDR;
2200 /* Reject the zero address */
2201 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2202 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2203 DEBUGOUT("MAC address is all zeros\n");
2204 status = IXGBE_ERR_INVALID_MAC_ADDR;
2210 * ixgbe_set_rar_generic - Set Rx address register
2211 * @hw: pointer to hardware structure
2212 * @index: Receive address register to write
2213 * @addr: Address to put into receive address register
2214 * @vmdq: VMDq "set" or "pool" index
2215 * @enable_addr: set flag that address is active
2217 * Puts an ethernet address into a receive address register.
2219 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2222 u32 rar_low, rar_high;
2223 u32 rar_entries = hw->mac.num_rar_entries;
2225 DEBUGFUNC("ixgbe_set_rar_generic");
2227 /* Make sure we are using a valid rar index range */
2228 if (index >= rar_entries) {
2229 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2230 "RAR index %d is out of range.\n", index);
2231 return IXGBE_ERR_INVALID_ARGUMENT;
2234 /* setup VMDq pool selection before this RAR gets enabled */
2235 hw->mac.ops.set_vmdq(hw, index, vmdq);
2238 * HW expects these in little endian so we reverse the byte
2239 * order from network order (big endian) to little endian
2241 rar_low = ((u32)addr[0] |
2242 ((u32)addr[1] << 8) |
2243 ((u32)addr[2] << 16) |
2244 ((u32)addr[3] << 24));
2246 * Some parts put the VMDq setting in the extra RAH bits,
2247 * so save everything except the lower 16 bits that hold part
2248 * of the address and the address valid bit.
2250 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2251 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2252 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2254 if (enable_addr != 0)
2255 rar_high |= IXGBE_RAH_AV;
2257 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2258 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2260 return IXGBE_SUCCESS;
2264 * ixgbe_clear_rar_generic - Remove Rx address register
2265 * @hw: pointer to hardware structure
2266 * @index: Receive address register to write
2268 * Clears an ethernet address from a receive address register.
2270 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2273 u32 rar_entries = hw->mac.num_rar_entries;
2275 DEBUGFUNC("ixgbe_clear_rar_generic");
2277 /* Make sure we are using a valid rar index range */
2278 if (index >= rar_entries) {
2279 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2280 "RAR index %d is out of range.\n", index);
2281 return IXGBE_ERR_INVALID_ARGUMENT;
2285 * Some parts put the VMDq setting in the extra RAH bits,
2286 * so save everything except the lower 16 bits that hold part
2287 * of the address and the address valid bit.
2289 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2290 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2292 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2293 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2295 /* clear VMDq pool/queue selection for this RAR */
2296 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2298 return IXGBE_SUCCESS;
2302 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2303 * @hw: pointer to hardware structure
2305 * Places the MAC address in receive address register 0 and clears the rest
2306 * of the receive address registers. Clears the multicast table. Assumes
2307 * the receiver is in reset when the routine is called.
2309 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2312 u32 rar_entries = hw->mac.num_rar_entries;
2314 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2317 * If the current mac address is valid, assume it is a software override
2318 * to the permanent address.
2319 * Otherwise, use the permanent address from the eeprom.
2321 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2322 IXGBE_ERR_INVALID_MAC_ADDR) {
2323 /* Get the MAC address from the RAR0 for later reference */
2324 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2326 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2327 hw->mac.addr[0], hw->mac.addr[1],
2329 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2330 hw->mac.addr[4], hw->mac.addr[5]);
2332 /* Setup the receive address. */
2333 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2334 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2335 hw->mac.addr[0], hw->mac.addr[1],
2337 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2338 hw->mac.addr[4], hw->mac.addr[5]);
2340 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2342 /* clear VMDq pool/queue selection for RAR 0 */
2343 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2345 hw->addr_ctrl.overflow_promisc = 0;
2347 hw->addr_ctrl.rar_used_count = 1;
2349 /* Zero out the other receive addresses. */
2350 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2351 for (i = 1; i < rar_entries; i++) {
2352 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2353 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2357 hw->addr_ctrl.mta_in_use = 0;
2358 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2360 DEBUGOUT(" Clearing MTA\n");
2361 for (i = 0; i < hw->mac.mcft_size; i++)
2362 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2364 ixgbe_init_uta_tables(hw);
2366 return IXGBE_SUCCESS;
2370 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2371 * @hw: pointer to hardware structure
2372 * @addr: new address
2374 * Adds it to unused receive address register or goes into promiscuous mode.
2376 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2378 u32 rar_entries = hw->mac.num_rar_entries;
2381 DEBUGFUNC("ixgbe_add_uc_addr");
2383 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2384 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2387 * Place this address in the RAR if there is room,
2388 * else put the controller into promiscuous mode
2390 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2391 rar = hw->addr_ctrl.rar_used_count;
2392 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2393 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2394 hw->addr_ctrl.rar_used_count++;
2396 hw->addr_ctrl.overflow_promisc++;
2399 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2403 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2404 * @hw: pointer to hardware structure
2405 * @addr_list: the list of new addresses
2406 * @addr_count: number of addresses
2407 * @next: iterator function to walk the address list
2409 * The given list replaces any existing list. Clears the secondary addrs from
2410 * receive address registers. Uses unused receive address registers for the
2411 * first secondary addresses, and falls back to promiscuous mode as needed.
2413 * Drivers using secondary unicast addresses must set user_set_promisc when
2414 * manually putting the device into promiscuous mode.
2416 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2417 u32 addr_count, ixgbe_mc_addr_itr next)
2421 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2426 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2429 * Clear accounting of old secondary address list,
2430 * don't count RAR[0]
2432 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2433 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2434 hw->addr_ctrl.overflow_promisc = 0;
2436 /* Zero out the other receive addresses */
2437 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2438 for (i = 0; i < uc_addr_in_use; i++) {
2439 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2440 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2443 /* Add the new addresses */
2444 for (i = 0; i < addr_count; i++) {
2445 DEBUGOUT(" Adding the secondary addresses:\n");
2446 addr = next(hw, &addr_list, &vmdq);
2447 ixgbe_add_uc_addr(hw, addr, vmdq);
2450 if (hw->addr_ctrl.overflow_promisc) {
2451 /* enable promisc if not already in overflow or set by user */
2452 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2453 DEBUGOUT(" Entering address overflow promisc mode\n");
2454 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2455 fctrl |= IXGBE_FCTRL_UPE;
2456 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2459 /* only disable if set by overflow, not by user */
2460 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2461 DEBUGOUT(" Leaving address overflow promisc mode\n");
2462 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2463 fctrl &= ~IXGBE_FCTRL_UPE;
2464 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2468 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2469 return IXGBE_SUCCESS;
2473 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2474 * @hw: pointer to hardware structure
2475 * @mc_addr: the multicast address
2477 * Extracts the 12 bits, from a multicast address, to determine which
2478 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2479 * incoming rx multicast addresses, to determine the bit-vector to check in
2480 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2481 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2482 * to mc_filter_type.
2484 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2488 DEBUGFUNC("ixgbe_mta_vector");
2490 switch (hw->mac.mc_filter_type) {
2491 case 0: /* use bits [47:36] of the address */
2492 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2494 case 1: /* use bits [46:35] of the address */
2495 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2497 case 2: /* use bits [45:34] of the address */
2498 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2500 case 3: /* use bits [43:32] of the address */
2501 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2503 default: /* Invalid mc_filter_type */
2504 DEBUGOUT("MC filter type param set incorrectly\n");
2509 /* vector can only be 12-bits or boundary will be exceeded */
2515 * ixgbe_set_mta - Set bit-vector in multicast table
2516 * @hw: pointer to hardware structure
2517 * @hash_value: Multicast address hash value
2519 * Sets the bit-vector in the multicast table.
2521 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2527 DEBUGFUNC("ixgbe_set_mta");
2529 hw->addr_ctrl.mta_in_use++;
2531 vector = ixgbe_mta_vector(hw, mc_addr);
2532 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2535 * The MTA is a register array of 128 32-bit registers. It is treated
2536 * like an array of 4096 bits. We want to set bit
2537 * BitArray[vector_value]. So we figure out what register the bit is
2538 * in, read it, OR in the new bit, then write back the new value. The
2539 * register is determined by the upper 7 bits of the vector value and
2540 * the bit within that register are determined by the lower 5 bits of
2543 vector_reg = (vector >> 5) & 0x7F;
2544 vector_bit = vector & 0x1F;
2545 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2549 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2550 * @hw: pointer to hardware structure
2551 * @mc_addr_list: the list of new multicast addresses
2552 * @mc_addr_count: number of addresses
2553 * @next: iterator function to walk the multicast address list
2554 * @clear: flag, when set clears the table beforehand
2556 * When the clear flag is set, the given list replaces any existing list.
2557 * Hashes the given addresses into the multicast table.
2559 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2560 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2566 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2569 * Set the new number of MC addresses that we are being requested to
2572 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2573 hw->addr_ctrl.mta_in_use = 0;
2575 /* Clear mta_shadow */
2577 DEBUGOUT(" Clearing MTA\n");
2578 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2581 /* Update mta_shadow */
2582 for (i = 0; i < mc_addr_count; i++) {
2583 DEBUGOUT(" Adding the multicast addresses:\n");
2584 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2588 for (i = 0; i < hw->mac.mcft_size; i++)
2589 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2590 hw->mac.mta_shadow[i]);
2592 if (hw->addr_ctrl.mta_in_use > 0)
2593 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2594 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2596 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2597 return IXGBE_SUCCESS;
2601 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2602 * @hw: pointer to hardware structure
2604 * Enables multicast address in RAR and the use of the multicast hash table.
2606 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2608 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2610 DEBUGFUNC("ixgbe_enable_mc_generic");
2612 if (a->mta_in_use > 0)
2613 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2614 hw->mac.mc_filter_type);
2616 return IXGBE_SUCCESS;
2620 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2621 * @hw: pointer to hardware structure
2623 * Disables multicast address in RAR and the use of the multicast hash table.
2625 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2627 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2629 DEBUGFUNC("ixgbe_disable_mc_generic");
2631 if (a->mta_in_use > 0)
2632 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2634 return IXGBE_SUCCESS;
2638 * ixgbe_fc_enable_generic - Enable flow control
2639 * @hw: pointer to hardware structure
2641 * Enable flow control according to the current settings.
2643 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2645 s32 ret_val = IXGBE_SUCCESS;
2646 u32 mflcn_reg, fccfg_reg;
2651 DEBUGFUNC("ixgbe_fc_enable_generic");
2653 /* Validate the water mark configuration */
2654 if (!hw->fc.pause_time) {
2655 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2659 /* Low water mark of zero causes XOFF floods */
2660 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2661 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2662 hw->fc.high_water[i]) {
2663 if (!hw->fc.low_water[i] ||
2664 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2665 DEBUGOUT("Invalid water mark configuration\n");
2666 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2672 /* Negotiate the fc mode to use */
2673 ixgbe_fc_autoneg(hw);
2675 /* Disable any previous flow control settings */
2676 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2677 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2679 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2680 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2683 * The possible values of fc.current_mode are:
2684 * 0: Flow control is completely disabled
2685 * 1: Rx flow control is enabled (we can receive pause frames,
2686 * but not send pause frames).
2687 * 2: Tx flow control is enabled (we can send pause frames but
2688 * we do not support receiving pause frames).
2689 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2692 switch (hw->fc.current_mode) {
2695 * Flow control is disabled by software override or autoneg.
2696 * The code below will actually disable it in the HW.
2699 case ixgbe_fc_rx_pause:
2701 * Rx Flow control is enabled and Tx Flow control is
2702 * disabled by software override. Since there really
2703 * isn't a way to advertise that we are capable of RX
2704 * Pause ONLY, we will advertise that we support both
2705 * symmetric and asymmetric Rx PAUSE. Later, we will
2706 * disable the adapter's ability to send PAUSE frames.
2708 mflcn_reg |= IXGBE_MFLCN_RFCE;
2710 case ixgbe_fc_tx_pause:
2712 * Tx Flow control is enabled, and Rx Flow control is
2713 * disabled by software override.
2715 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2718 /* Flow control (both Rx and Tx) is enabled by SW override. */
2719 mflcn_reg |= IXGBE_MFLCN_RFCE;
2720 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2723 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2724 "Flow control param set incorrectly\n");
2725 ret_val = IXGBE_ERR_CONFIG;
2730 /* Set 802.3x based flow control settings. */
2731 mflcn_reg |= IXGBE_MFLCN_DPF;
2732 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2733 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2736 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2737 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2738 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2739 hw->fc.high_water[i]) {
2740 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2741 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2742 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2744 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2746 * In order to prevent Tx hangs when the internal Tx
2747 * switch is enabled we must set the high water mark
2748 * to the maximum FCRTH value. This allows the Tx
2749 * switch to function even under heavy Rx workloads.
2751 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2754 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2757 /* Configure pause time (2 TCs per register) */
2758 reg = hw->fc.pause_time * 0x00010001;
2759 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2760 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2762 /* Configure flow control refresh threshold value */
2763 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2770 * ixgbe_negotiate_fc - Negotiate flow control
2771 * @hw: pointer to hardware structure
2772 * @adv_reg: flow control advertised settings
2773 * @lp_reg: link partner's flow control settings
2774 * @adv_sym: symmetric pause bit in advertisement
2775 * @adv_asm: asymmetric pause bit in advertisement
2776 * @lp_sym: symmetric pause bit in link partner advertisement
2777 * @lp_asm: asymmetric pause bit in link partner advertisement
2779 * Find the intersection between advertised settings and link partner's
2780 * advertised settings
2782 STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2783 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2785 if ((!(adv_reg)) || (!(lp_reg))) {
2786 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2787 "Local or link partner's advertised flow control "
2788 "settings are NULL. Local: %x, link partner: %x\n",
2790 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2793 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2795 * Now we need to check if the user selected Rx ONLY
2796 * of pause frames. In this case, we had to advertise
2797 * FULL flow control because we could not advertise RX
2798 * ONLY. Hence, we must now check to see if we need to
2799 * turn OFF the TRANSMISSION of PAUSE frames.
2801 if (hw->fc.requested_mode == ixgbe_fc_full) {
2802 hw->fc.current_mode = ixgbe_fc_full;
2803 DEBUGOUT("Flow Control = FULL.\n");
2805 hw->fc.current_mode = ixgbe_fc_rx_pause;
2806 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2808 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2809 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2810 hw->fc.current_mode = ixgbe_fc_tx_pause;
2811 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2812 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2813 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2814 hw->fc.current_mode = ixgbe_fc_rx_pause;
2815 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2817 hw->fc.current_mode = ixgbe_fc_none;
2818 DEBUGOUT("Flow Control = NONE.\n");
2820 return IXGBE_SUCCESS;
2824 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2825 * @hw: pointer to hardware structure
2827 * Enable flow control according on 1 gig fiber.
2829 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2831 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2832 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2835 * On multispeed fiber at 1g, bail out if
2836 * - link is up but AN did not complete, or if
2837 * - link is up and AN completed but timed out
2840 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2841 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2842 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2843 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2844 "Auto-Negotiation did not complete or timed out");
2848 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2849 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2851 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2852 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2853 IXGBE_PCS1GANA_ASM_PAUSE,
2854 IXGBE_PCS1GANA_SYM_PAUSE,
2855 IXGBE_PCS1GANA_ASM_PAUSE);
2862 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2863 * @hw: pointer to hardware structure
2865 * Enable flow control according to IEEE clause 37.
2867 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2869 u32 links2, anlp1_reg, autoc_reg, links;
2870 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2873 * On backplane, bail out if
2874 * - backplane autoneg was not completed, or if
2875 * - we are 82599 and link partner is not AN enabled
2877 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2878 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2879 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2880 "Auto-Negotiation did not complete");
2884 if (hw->mac.type == ixgbe_mac_82599EB) {
2885 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2886 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2887 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2888 "Link partner is not AN enabled");
2893 * Read the 10g AN autoc and LP ability registers and resolve
2894 * local flow control settings accordingly
2896 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2897 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2899 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2900 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2901 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2908 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2909 * @hw: pointer to hardware structure
2911 * Enable flow control according to IEEE clause 37.
2913 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2915 u16 technology_ability_reg = 0;
2916 u16 lp_technology_ability_reg = 0;
2918 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2919 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2920 &technology_ability_reg);
2921 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2922 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2923 &lp_technology_ability_reg);
2925 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2926 (u32)lp_technology_ability_reg,
2927 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2928 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2932 * ixgbe_fc_autoneg - Configure flow control
2933 * @hw: pointer to hardware structure
2935 * Compares our advertised flow control capabilities to those advertised by
2936 * our link partner, and determines the proper flow control mode to use.
2938 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2940 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2941 ixgbe_link_speed speed;
2944 DEBUGFUNC("ixgbe_fc_autoneg");
2947 * AN should have completed when the cable was plugged in.
2948 * Look for reasons to bail out. Bail out if:
2949 * - FC autoneg is disabled, or if
2952 if (hw->fc.disable_fc_autoneg) {
2953 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2954 "Flow control autoneg is disabled");
2958 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2960 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2964 switch (hw->phy.media_type) {
2965 /* Autoneg flow control on fiber adapters */
2966 case ixgbe_media_type_fiber:
2967 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2968 ret_val = ixgbe_fc_autoneg_fiber(hw);
2971 /* Autoneg flow control on backplane adapters */
2972 case ixgbe_media_type_backplane:
2973 ret_val = ixgbe_fc_autoneg_backplane(hw);
2976 /* Autoneg flow control on copper adapters */
2977 case ixgbe_media_type_copper:
2978 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2979 ret_val = ixgbe_fc_autoneg_copper(hw);
2987 if (ret_val == IXGBE_SUCCESS) {
2988 hw->fc.fc_was_autonegged = true;
2990 hw->fc.fc_was_autonegged = false;
2991 hw->fc.current_mode = hw->fc.requested_mode;
2996 * ixgbe_disable_pcie_master - Disable PCI-express master access
2997 * @hw: pointer to hardware structure
2999 * Disables PCI-Express master access and verifies there are no pending
3000 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3001 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3002 * is returned signifying master requests disabled.
3004 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3006 s32 status = IXGBE_SUCCESS;
3009 DEBUGFUNC("ixgbe_disable_pcie_master");
3011 /* Always set this bit to ensure any future transactions are blocked */
3012 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3014 /* Exit if master requets are blocked */
3015 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3018 /* Poll for master request bit to clear */
3019 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3021 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3026 * Two consecutive resets are required via CTRL.RST per datasheet
3027 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3028 * of this need. The first reset prevents new master requests from
3029 * being issued by our device. We then must wait 1usec or more for any
3030 * remaining completions from the PCIe bus to trickle in, and then reset
3031 * again to clear out any effects they may have had on our device.
3033 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3034 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3037 * Before proceeding, make sure that the PCIe block does not have
3038 * transactions pending.
3040 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3042 if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
3043 IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3047 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3048 "PCIe transaction pending bit also did not clear.\n");
3049 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3056 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3057 * @hw: pointer to hardware structure
3058 * @mask: Mask to specify which semaphore to acquire
3060 * Acquires the SWFW semaphore through the GSSR register for the specified
3061 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3063 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3067 u32 fwmask = mask << 5;
3070 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3074 * SW EEPROM semaphore bit is used for access to all
3075 * SW_FW_SYNC/GSSR bits (not just EEPROM)
3077 if (ixgbe_get_eeprom_semaphore(hw))
3078 return IXGBE_ERR_SWFW_SYNC;
3080 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3081 if (!(gssr & (fwmask | swmask)))
3085 * Firmware currently using resource (fwmask) or other software
3086 * thread currently using resource (swmask)
3088 ixgbe_release_eeprom_semaphore(hw);
3094 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3095 return IXGBE_ERR_SWFW_SYNC;
3099 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3101 ixgbe_release_eeprom_semaphore(hw);
3102 return IXGBE_SUCCESS;
3106 * ixgbe_release_swfw_sync - Release SWFW semaphore
3107 * @hw: pointer to hardware structure
3108 * @mask: Mask to specify which semaphore to release
3110 * Releases the SWFW semaphore through the GSSR register for the specified
3111 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3113 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3118 DEBUGFUNC("ixgbe_release_swfw_sync");
3120 ixgbe_get_eeprom_semaphore(hw);
3122 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3124 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3126 ixgbe_release_eeprom_semaphore(hw);
3130 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3131 * @hw: pointer to hardware structure
3133 * Stops the receive data path and waits for the HW to internally empty
3134 * the Rx security block
3136 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3138 #define IXGBE_MAX_SECRX_POLL 40
3143 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3146 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3147 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3148 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3149 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3150 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3151 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3154 /* Use interrupt-safe sleep just in case */
3158 /* For informational purposes only */
3159 if (i >= IXGBE_MAX_SECRX_POLL)
3160 DEBUGOUT("Rx unit being enabled before security "
3161 "path fully disabled. Continuing with init.\n");
3163 return IXGBE_SUCCESS;
3167 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3168 * @hw: pointer to hardware structure
3170 * Enables the receive data path.
3172 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3176 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3178 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3179 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3180 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3181 IXGBE_WRITE_FLUSH(hw);
3183 return IXGBE_SUCCESS;
3187 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3188 * @hw: pointer to hardware structure
3189 * @regval: register value to write to RXCTRL
3191 * Enables the Rx DMA unit
3193 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3195 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3197 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
3199 return IXGBE_SUCCESS;
3203 * ixgbe_blink_led_start_generic - Blink LED based on index.
3204 * @hw: pointer to hardware structure
3205 * @index: led number to blink
3207 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3209 ixgbe_link_speed speed = 0;
3211 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3212 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3213 s32 ret_val = IXGBE_SUCCESS;
3215 DEBUGFUNC("ixgbe_blink_led_start_generic");
3218 * Link must be up to auto-blink the LEDs;
3219 * Force it if link is down.
3221 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3224 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3227 bool got_lock = false;
3228 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3229 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3230 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3231 IXGBE_GSSR_MAC_CSR_SM);
3232 if (ret_val != IXGBE_SUCCESS) {
3233 ret_val = IXGBE_ERR_SWFW_SYNC;
3239 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3240 autoc_reg |= IXGBE_AUTOC_FLU;
3241 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3242 IXGBE_WRITE_FLUSH(hw);
3245 hw->mac.ops.release_swfw_sync(hw,
3246 IXGBE_GSSR_MAC_CSR_SM);
3250 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3251 led_reg |= IXGBE_LED_BLINK(index);
3252 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3253 IXGBE_WRITE_FLUSH(hw);
3260 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3261 * @hw: pointer to hardware structure
3262 * @index: led number to stop blinking
3264 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3266 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3267 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3268 s32 ret_val = IXGBE_SUCCESS;
3269 bool got_lock = false;
3271 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3272 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3275 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3276 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3277 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3278 IXGBE_GSSR_MAC_CSR_SM);
3279 if (ret_val != IXGBE_SUCCESS) {
3280 ret_val = IXGBE_ERR_SWFW_SYNC;
3287 autoc_reg &= ~IXGBE_AUTOC_FLU;
3288 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3289 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3291 if (hw->mac.type == ixgbe_mac_82599EB)
3292 ixgbe_reset_pipeline_82599(hw);
3295 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3297 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3298 led_reg &= ~IXGBE_LED_BLINK(index);
3299 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3300 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3301 IXGBE_WRITE_FLUSH(hw);
3308 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3309 * @hw: pointer to hardware structure
3310 * @san_mac_offset: SAN MAC address offset
3312 * This function will read the EEPROM location for the SAN MAC address
3313 * pointer, and returns the value at that location. This is used in both
3314 * get and set mac_addr routines.
3316 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3317 u16 *san_mac_offset)
3321 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3324 * First read the EEPROM pointer to see if the MAC addresses are
3327 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3330 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3331 "eeprom at offset %d failed",
3332 IXGBE_SAN_MAC_ADDR_PTR);
3339 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3340 * @hw: pointer to hardware structure
3341 * @san_mac_addr: SAN MAC address
3343 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3344 * per-port, so set_lan_id() must be called before reading the addresses.
3345 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3346 * upon for non-SFP connections, so we must call it here.
3348 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3350 u16 san_mac_data, san_mac_offset;
3354 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3357 * First read the EEPROM pointer to see if the MAC addresses are
3358 * available. If they're not, no point in calling set_lan_id() here.
3360 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3361 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3362 goto san_mac_addr_out;
3364 /* make sure we know which port we need to program */
3365 hw->mac.ops.set_lan_id(hw);
3366 /* apply the port offset to the address offset */
3367 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3368 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3369 for (i = 0; i < 3; i++) {
3370 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3373 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3374 "eeprom read at offset %d failed",
3376 goto san_mac_addr_out;
3378 san_mac_addr[i * 2] = (u8)(san_mac_data);
3379 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3382 return IXGBE_SUCCESS;
3386 * No addresses available in this EEPROM. It's not an
3387 * error though, so just wipe the local address and return.
3389 for (i = 0; i < 6; i++)
3390 san_mac_addr[i] = 0xFF;
3391 return IXGBE_SUCCESS;
3395 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3396 * @hw: pointer to hardware structure
3397 * @san_mac_addr: SAN MAC address
3399 * Write a SAN MAC address to the EEPROM.
3401 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3404 u16 san_mac_data, san_mac_offset;
3407 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3409 /* Look for SAN mac address pointer. If not defined, return */
3410 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3411 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3412 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3414 /* Make sure we know which port we need to write */
3415 hw->mac.ops.set_lan_id(hw);
3416 /* Apply the port offset to the address offset */
3417 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3418 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3420 for (i = 0; i < 3; i++) {
3421 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3422 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3423 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3427 return IXGBE_SUCCESS;
3431 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3432 * @hw: pointer to hardware structure
3434 * Read PCIe configuration space, and get the MSI-X vector count from
3435 * the capabilities table.
3437 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3443 switch (hw->mac.type) {
3444 case ixgbe_mac_82598EB:
3445 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3446 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3448 case ixgbe_mac_82599EB:
3449 case ixgbe_mac_X540:
3450 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3451 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3457 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3458 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3459 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3461 /* MSI-X count is zero-based in HW */
3464 if (msix_count > max_msix_count)
3465 msix_count = max_msix_count;
3471 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3472 * @hw: pointer to hardware structure
3473 * @addr: Address to put into receive address register
3474 * @vmdq: VMDq pool to assign
3476 * Puts an ethernet address into a receive address register, or
3477 * finds the rar that it is aleady in; adds to the pool list
3479 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3481 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3482 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3484 u32 rar_low, rar_high;
3485 u32 addr_low, addr_high;
3487 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3489 /* swap bytes for HW little endian */
3490 addr_low = addr[0] | (addr[1] << 8)
3493 addr_high = addr[4] | (addr[5] << 8);
3496 * Either find the mac_id in rar or find the first empty space.
3497 * rar_highwater points to just after the highest currently used
3498 * rar in order to shorten the search. It grows when we add a new
3501 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3502 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3504 if (((IXGBE_RAH_AV & rar_high) == 0)
3505 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3506 first_empty_rar = rar;
3507 } else if ((rar_high & 0xFFFF) == addr_high) {
3508 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3509 if (rar_low == addr_low)
3510 break; /* found it already in the rars */
3514 if (rar < hw->mac.rar_highwater) {
3515 /* already there so just add to the pool bits */
3516 ixgbe_set_vmdq(hw, rar, vmdq);
3517 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3518 /* stick it into first empty RAR slot we found */
3519 rar = first_empty_rar;
3520 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3521 } else if (rar == hw->mac.rar_highwater) {
3522 /* add it to the top of the list and inc the highwater mark */
3523 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3524 hw->mac.rar_highwater++;
3525 } else if (rar >= hw->mac.num_rar_entries) {
3526 return IXGBE_ERR_INVALID_MAC_ADDR;
3530 * If we found rar[0], make sure the default pool bit (we use pool 0)
3531 * remains cleared to be sure default pool packets will get delivered
3534 ixgbe_clear_vmdq(hw, rar, 0);
3540 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3541 * @hw: pointer to hardware struct
3542 * @rar: receive address register index to disassociate
3543 * @vmdq: VMDq pool index to remove from the rar
3545 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3547 u32 mpsar_lo, mpsar_hi;
3548 u32 rar_entries = hw->mac.num_rar_entries;
3550 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3552 /* Make sure we are using a valid rar index range */
3553 if (rar >= rar_entries) {
3554 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3555 "RAR index %d is out of range.\n", rar);
3556 return IXGBE_ERR_INVALID_ARGUMENT;
3559 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3560 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3562 if (!mpsar_lo && !mpsar_hi)
3565 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3567 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3571 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3574 } else if (vmdq < 32) {
3575 mpsar_lo &= ~(1 << vmdq);
3576 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3578 mpsar_hi &= ~(1 << (vmdq - 32));
3579 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3582 /* was that the last pool using this rar? */
3583 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3584 hw->mac.ops.clear_rar(hw, rar);
3586 return IXGBE_SUCCESS;
3590 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3591 * @hw: pointer to hardware struct
3592 * @rar: receive address register index to associate with a VMDq index
3593 * @vmdq: VMDq pool index
3595 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3598 u32 rar_entries = hw->mac.num_rar_entries;
3600 DEBUGFUNC("ixgbe_set_vmdq_generic");
3602 /* Make sure we are using a valid rar index range */
3603 if (rar >= rar_entries) {
3604 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3605 "RAR index %d is out of range.\n", rar);
3606 return IXGBE_ERR_INVALID_ARGUMENT;
3610 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3612 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3614 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3615 mpsar |= 1 << (vmdq - 32);
3616 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3618 return IXGBE_SUCCESS;
3622 * This function should only be involved in the IOV mode.
3623 * In IOV mode, Default pool is next pool after the number of
3624 * VFs advertized and not 0.
3625 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3627 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3628 * @hw: pointer to hardware struct
3629 * @vmdq: VMDq pool index
3631 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3633 u32 rar = hw->mac.san_mac_rar_index;
3635 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3638 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3639 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3641 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3642 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3645 return IXGBE_SUCCESS;
3649 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3650 * @hw: pointer to hardware structure
3652 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3656 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3657 DEBUGOUT(" Clearing UTA\n");
3659 for (i = 0; i < 128; i++)
3660 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3662 return IXGBE_SUCCESS;
3666 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3667 * @hw: pointer to hardware structure
3668 * @vlan: VLAN id to write to VLAN filter
3670 * return the VLVF index where this VLAN id should be placed
3673 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3676 u32 first_empty_slot = 0;
3679 /* short cut the special case */
3684 * Search for the vlan id in the VLVF entries. Save off the first empty
3685 * slot found along the way
3687 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3688 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3689 if (!bits && !(first_empty_slot))
3690 first_empty_slot = regindex;
3691 else if ((bits & 0x0FFF) == vlan)
3696 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3697 * in the VLVF. Else use the first empty VLVF register for this
3700 if (regindex >= IXGBE_VLVF_ENTRIES) {
3701 if (first_empty_slot)
3702 regindex = first_empty_slot;
3704 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3705 "No space in VLVF.\n");
3706 regindex = IXGBE_ERR_NO_SPACE;
3714 * ixgbe_set_vfta_generic - Set VLAN filter table
3715 * @hw: pointer to hardware structure
3716 * @vlan: VLAN id to write to VLAN filter
3717 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3718 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3720 * Turn on/off specified VLAN in the VLAN filter table.
3722 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3729 s32 ret_val = IXGBE_SUCCESS;
3730 bool vfta_changed = false;
3732 DEBUGFUNC("ixgbe_set_vfta_generic");
3735 return IXGBE_ERR_PARAM;
3738 * this is a 2 part operation - first the VFTA, then the
3739 * VLVF and VLVFB if VT Mode is set
3740 * We don't write the VFTA until we know the VLVF part succeeded.
3744 * The VFTA is a bitstring made up of 128 32-bit registers
3745 * that enable the particular VLAN id, much like the MTA:
3746 * bits[11-5]: which register
3747 * bits[4-0]: which bit in the register
3749 regindex = (vlan >> 5) & 0x7F;
3750 bitindex = vlan & 0x1F;
3751 targetbit = (1 << bitindex);
3752 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3755 if (!(vfta & targetbit)) {
3757 vfta_changed = true;
3760 if ((vfta & targetbit)) {
3762 vfta_changed = true;
3767 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3769 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3771 if (ret_val != IXGBE_SUCCESS)
3775 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3777 return IXGBE_SUCCESS;
3781 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3782 * @hw: pointer to hardware structure
3783 * @vlan: VLAN id to write to VLAN filter
3784 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3785 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3786 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3789 * Turn on/off specified bit in VLVF table.
3791 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3792 bool vlan_on, bool *vfta_changed)
3796 DEBUGFUNC("ixgbe_set_vlvf_generic");
3799 return IXGBE_ERR_PARAM;
3801 /* If VT Mode is set
3803 * make sure the vlan is in VLVF
3804 * set the vind bit in the matching VLVFB
3806 * clear the pool bit and possibly the vind
3808 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3809 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3813 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3818 /* set the pool bit */
3820 bits = IXGBE_READ_REG(hw,
3821 IXGBE_VLVFB(vlvf_index * 2));
3822 bits |= (1 << vind);
3824 IXGBE_VLVFB(vlvf_index * 2),
3827 bits = IXGBE_READ_REG(hw,
3828 IXGBE_VLVFB((vlvf_index * 2) + 1));
3829 bits |= (1 << (vind - 32));
3831 IXGBE_VLVFB((vlvf_index * 2) + 1),
3835 /* clear the pool bit */
3837 bits = IXGBE_READ_REG(hw,
3838 IXGBE_VLVFB(vlvf_index * 2));
3839 bits &= ~(1 << vind);
3841 IXGBE_VLVFB(vlvf_index * 2),
3843 bits |= IXGBE_READ_REG(hw,
3844 IXGBE_VLVFB((vlvf_index * 2) + 1));
3846 bits = IXGBE_READ_REG(hw,
3847 IXGBE_VLVFB((vlvf_index * 2) + 1));
3848 bits &= ~(1 << (vind - 32));
3850 IXGBE_VLVFB((vlvf_index * 2) + 1),
3852 bits |= IXGBE_READ_REG(hw,
3853 IXGBE_VLVFB(vlvf_index * 2));
3858 * If there are still bits set in the VLVFB registers
3859 * for the VLAN ID indicated we need to see if the
3860 * caller is requesting that we clear the VFTA entry bit.
3861 * If the caller has requested that we clear the VFTA
3862 * entry bit but there are still pools/VFs using this VLAN
3863 * ID entry then ignore the request. We're not worried
3864 * about the case where we're turning the VFTA VLAN ID
3865 * entry bit on, only when requested to turn it off as
3866 * there may be multiple pools and/or VFs using the
3867 * VLAN ID entry. In that case we cannot clear the
3868 * VFTA bit until all pools/VFs using that VLAN ID have also
3869 * been cleared. This will be indicated by "bits" being
3873 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3874 (IXGBE_VLVF_VIEN | vlan));
3875 if ((!vlan_on) && (vfta_changed != NULL)) {
3876 /* someone wants to clear the vfta entry
3877 * but some pools/VFs are still using it.
3879 *vfta_changed = false;
3882 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3885 return IXGBE_SUCCESS;
3889 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3890 * @hw: pointer to hardware structure
3892 * Clears the VLAN filer table, and the VMDq index associated with the filter
3894 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3898 DEBUGFUNC("ixgbe_clear_vfta_generic");
3900 for (offset = 0; offset < hw->mac.vft_size; offset++)
3901 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3903 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3904 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3905 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3906 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3909 return IXGBE_SUCCESS;
3913 * ixgbe_check_mac_link_generic - Determine link and speed status
3914 * @hw: pointer to hardware structure
3915 * @speed: pointer to link speed
3916 * @link_up: true when link is up
3917 * @link_up_wait_to_complete: bool used to wait for link up or not
3919 * Reads the links register to determine if link is up and the current speed
3921 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3922 bool *link_up, bool link_up_wait_to_complete)
3924 u32 links_reg, links_orig;
3927 DEBUGFUNC("ixgbe_check_mac_link_generic");
3929 /* clear the old state */
3930 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3932 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3934 if (links_orig != links_reg) {
3935 DEBUGOUT2("LINKS changed from %08X to %08X\n",
3936 links_orig, links_reg);
3939 if (link_up_wait_to_complete) {
3940 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
3941 if (links_reg & IXGBE_LINKS_UP) {
3948 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3951 if (links_reg & IXGBE_LINKS_UP)
3957 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3958 IXGBE_LINKS_SPEED_10G_82599)
3959 *speed = IXGBE_LINK_SPEED_10GB_FULL;
3960 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3961 IXGBE_LINKS_SPEED_1G_82599)
3962 *speed = IXGBE_LINK_SPEED_1GB_FULL;
3963 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
3964 IXGBE_LINKS_SPEED_100_82599)
3965 *speed = IXGBE_LINK_SPEED_100_FULL;
3967 *speed = IXGBE_LINK_SPEED_UNKNOWN;
3969 return IXGBE_SUCCESS;
3973 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
3975 * @hw: pointer to hardware structure
3976 * @wwnn_prefix: the alternative WWNN prefix
3977 * @wwpn_prefix: the alternative WWPN prefix
3979 * This function will read the EEPROM from the alternative SAN MAC address
3980 * block to check the support for the alternative WWNN/WWPN prefix support.
3982 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
3986 u16 alt_san_mac_blk_offset;
3988 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
3990 /* clear output first */
3991 *wwnn_prefix = 0xFFFF;
3992 *wwpn_prefix = 0xFFFF;
3994 /* check if alternative SAN MAC is supported */
3995 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
3996 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
3997 goto wwn_prefix_err;
3999 if ((alt_san_mac_blk_offset == 0) ||
4000 (alt_san_mac_blk_offset == 0xFFFF))
4001 goto wwn_prefix_out;
4003 /* check capability in alternative san mac address block */
4004 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4005 if (hw->eeprom.ops.read(hw, offset, &caps))
4006 goto wwn_prefix_err;
4007 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4008 goto wwn_prefix_out;
4010 /* get the corresponding prefix for WWNN/WWPN */
4011 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4012 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4013 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4014 "eeprom read at offset %d failed", offset);
4017 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4018 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4019 goto wwn_prefix_err;
4022 return IXGBE_SUCCESS;
4025 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4026 "eeprom read at offset %d failed", offset);
4027 return IXGBE_SUCCESS;
4031 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4032 * @hw: pointer to hardware structure
4033 * @bs: the fcoe boot status
4035 * This function will read the FCOE boot status from the iSCSI FCOE block
4037 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4039 u16 offset, caps, flags;
4042 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4044 /* clear output first */
4045 *bs = ixgbe_fcoe_bootstatus_unavailable;
4047 /* check if FCOE IBA block is present */
4048 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4049 status = hw->eeprom.ops.read(hw, offset, &caps);
4050 if (status != IXGBE_SUCCESS)
4053 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4056 /* check if iSCSI FCOE block is populated */
4057 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4058 if (status != IXGBE_SUCCESS)
4061 if ((offset == 0) || (offset == 0xFFFF))
4064 /* read fcoe flags in iSCSI FCOE block */
4065 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4066 status = hw->eeprom.ops.read(hw, offset, &flags);
4067 if (status != IXGBE_SUCCESS)
4070 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4071 *bs = ixgbe_fcoe_bootstatus_enabled;
4073 *bs = ixgbe_fcoe_bootstatus_disabled;
4080 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4081 * @hw: pointer to hardware structure
4082 * @enable: enable or disable switch for anti-spoofing
4083 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4086 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4089 int pf_target_reg = pf >> 3;
4090 int pf_target_shift = pf % 8;
4093 if (hw->mac.type == ixgbe_mac_82598EB)
4097 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4100 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4101 * MAC anti-spoof enables in each register array element.
4103 for (j = 0; j < pf_target_reg; j++)
4104 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4107 * The PF should be allowed to spoof so that it can support
4108 * emulation mode NICs. Do not set the bits assigned to the PF
4110 pfvfspoof &= (1 << pf_target_shift) - 1;
4111 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4114 * Remaining pools belong to the PF so they do not need to have
4115 * anti-spoofing enabled.
4117 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4118 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4122 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4123 * @hw: pointer to hardware structure
4124 * @enable: enable or disable switch for VLAN anti-spoofing
4125 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4128 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4130 int vf_target_reg = vf >> 3;
4131 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4134 if (hw->mac.type == ixgbe_mac_82598EB)
4137 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4139 pfvfspoof |= (1 << vf_target_shift);
4141 pfvfspoof &= ~(1 << vf_target_shift);
4142 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4146 * ixgbe_get_device_caps_generic - Get additional device capabilities
4147 * @hw: pointer to hardware structure
4148 * @device_caps: the EEPROM word with the extra device capabilities
4150 * This function will read the EEPROM location for the device capabilities,
4151 * and return the word through device_caps.
4153 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4155 DEBUGFUNC("ixgbe_get_device_caps_generic");
4157 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4159 return IXGBE_SUCCESS;
4163 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4164 * @hw: pointer to hardware structure
4167 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4172 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4174 /* Enable relaxed ordering */
4175 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4176 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4177 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4178 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4181 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4182 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4183 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4184 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4185 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4191 * ixgbe_calculate_checksum - Calculate checksum for buffer
4192 * @buffer: pointer to EEPROM
4193 * @length: size of EEPROM to calculate a checksum for
4194 * Calculates the checksum for some buffer on a specified length. The
4195 * checksum calculated is returned.
4197 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4202 DEBUGFUNC("ixgbe_calculate_checksum");
4207 for (i = 0; i < length; i++)
4210 return (u8) (0 - sum);
4214 * ixgbe_host_interface_command - Issue command to manageability block
4215 * @hw: pointer to the HW structure
4216 * @buffer: contains the command to write and where the return status will
4218 * @length: length of buffer, must be multiple of 4 bytes
4220 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4221 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4223 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4227 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4228 u8 buf_len, dword_len;
4230 s32 ret_val = IXGBE_SUCCESS;
4232 DEBUGFUNC("ixgbe_host_interface_command");
4234 if (length == 0 || length & 0x3 ||
4235 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4236 DEBUGOUT("Buffer length failure.\n");
4237 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4241 /* Check that the host interface is enabled. */
4242 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4243 if ((hicr & IXGBE_HICR_EN) == 0) {
4244 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4245 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4249 /* Calculate length in DWORDs */
4250 dword_len = length >> 2;
4253 * The device driver writes the relevant command block
4254 * into the ram area.
4256 for (i = 0; i < dword_len; i++)
4257 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4258 i, IXGBE_CPU_TO_LE32(buffer[i]));
4260 /* Setting this bit tells the ARC that a new command is pending. */
4261 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4263 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4264 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4265 if (!(hicr & IXGBE_HICR_C))
4270 /* Check command successful completion. */
4271 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4272 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4273 DEBUGOUT("Command has failed with no status valid.\n");
4274 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4278 /* Calculate length in DWORDs */
4279 dword_len = hdr_size >> 2;
4281 /* first pull in the header so we know the buffer length */
4282 for (bi = 0; bi < dword_len; bi++) {
4283 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4284 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4287 /* If there is any thing in data position pull it in */
4288 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4292 if (length < (buf_len + hdr_size)) {
4293 DEBUGOUT("Buffer not large enough for reply message.\n");
4294 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4298 /* Calculate length in DWORDs, add 3 for odd lengths */
4299 dword_len = (buf_len + 3) >> 2;
4301 /* Pull in the rest of the buffer (bi is where we left off)*/
4302 for (; bi <= dword_len; bi++) {
4303 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4304 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4312 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4313 * @hw: pointer to the HW structure
4314 * @maj: driver version major number
4315 * @min: driver version minor number
4316 * @build: driver version build number
4317 * @sub: driver version sub build number
4319 * Sends driver version number to firmware through the manageability
4320 * block. On success return IXGBE_SUCCESS
4321 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4322 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4324 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4327 struct ixgbe_hic_drv_info fw_cmd;
4329 s32 ret_val = IXGBE_SUCCESS;
4331 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4333 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4335 ret_val = IXGBE_ERR_SWFW_SYNC;
4339 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4340 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4341 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4342 fw_cmd.port_num = (u8)hw->bus.func;
4343 fw_cmd.ver_maj = maj;
4344 fw_cmd.ver_min = min;
4345 fw_cmd.ver_build = build;
4346 fw_cmd.ver_sub = sub;
4347 fw_cmd.hdr.checksum = 0;
4348 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4349 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4353 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4354 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4356 if (ret_val != IXGBE_SUCCESS)
4359 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4360 FW_CEM_RESP_STATUS_SUCCESS)
4361 ret_val = IXGBE_SUCCESS;
4363 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4368 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4374 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4375 * @hw: pointer to hardware structure
4376 * @num_pb: number of packet buffers to allocate
4377 * @headroom: reserve n KB of headroom
4378 * @strategy: packet buffer allocation strategy
4380 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4383 u32 pbsize = hw->mac.rx_pb_size;
4385 u32 rxpktsize, txpktsize, txpbthresh;
4387 /* Reserve headroom */
4393 /* Divide remaining packet buffer space amongst the number of packet
4394 * buffers requested using supplied strategy.
4397 case PBA_STRATEGY_WEIGHTED:
4398 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4399 * buffer with 5/8 of the packet buffer space.
4401 rxpktsize = (pbsize * 5) / (num_pb * 4);
4402 pbsize -= rxpktsize * (num_pb / 2);
4403 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4404 for (; i < (num_pb / 2); i++)
4405 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4406 /* Fall through to configure remaining packet buffers */
4407 case PBA_STRATEGY_EQUAL:
4408 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4409 for (; i < num_pb; i++)
4410 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4416 /* Only support an equally distributed Tx packet buffer strategy. */
4417 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4418 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4419 for (i = 0; i < num_pb; i++) {
4420 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4421 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4424 /* Clear unused TCs, if any, to zero buffer size*/
4425 for (; i < IXGBE_MAX_PB; i++) {
4426 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4427 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4428 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4433 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4434 * @hw: pointer to the hardware structure
4436 * The 82599 and x540 MACs can experience issues if TX work is still pending
4437 * when a reset occurs. This function prevents this by flushing the PCIe
4438 * buffers on the system.
4440 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4442 u32 gcr_ext, hlreg0;
4445 * If double reset is not requested then all transactions should
4446 * already be clear and as such there is no work to do
4448 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4452 * Set loopback enable to prevent any transmits from being sent
4453 * should the link come up. This assumes that the RXCTRL.RXEN bit
4454 * has already been cleared.
4456 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4457 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4459 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4460 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4461 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4462 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4464 /* Flush all writes and allow 20usec for all transactions to clear */
4465 IXGBE_WRITE_FLUSH(hw);
4468 /* restore previous register values */
4469 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4470 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);