1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
39 #ident "$Id: ixgbe_common.c,v 1.382 2013/11/22 01:02:01 jtkirshe Exp $"
41 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
42 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
44 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
46 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
48 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
49 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
51 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
53 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
54 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
56 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
57 u16 words, u16 *data);
58 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data);
60 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64 * ixgbe_init_ops_generic - Inits function ptrs
65 * @hw: pointer to the hardware structure
67 * Initialize the function pointers.
69 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
71 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
72 struct ixgbe_mac_info *mac = &hw->mac;
73 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
75 DEBUGFUNC("ixgbe_init_ops_generic");
78 eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
79 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
80 if (eec & IXGBE_EEC_PRES) {
81 eeprom->ops.read = &ixgbe_read_eerd_generic;
82 eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
84 eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
85 eeprom->ops.read_buffer =
86 &ixgbe_read_eeprom_buffer_bit_bang_generic;
88 eeprom->ops.write = &ixgbe_write_eeprom_generic;
89 eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
90 eeprom->ops.validate_checksum =
91 &ixgbe_validate_eeprom_checksum_generic;
92 eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
93 eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
96 mac->ops.init_hw = &ixgbe_init_hw_generic;
97 mac->ops.reset_hw = NULL;
98 mac->ops.start_hw = &ixgbe_start_hw_generic;
99 mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
100 mac->ops.get_media_type = NULL;
101 mac->ops.get_supported_physical_layer = NULL;
102 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
103 mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
104 mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
105 mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
106 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
107 mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
108 mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
111 mac->ops.led_on = &ixgbe_led_on_generic;
112 mac->ops.led_off = &ixgbe_led_off_generic;
113 mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
114 mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
116 /* RAR, Multicast, VLAN */
117 mac->ops.set_rar = &ixgbe_set_rar_generic;
118 mac->ops.clear_rar = &ixgbe_clear_rar_generic;
119 mac->ops.insert_mac_addr = NULL;
120 mac->ops.set_vmdq = NULL;
121 mac->ops.clear_vmdq = NULL;
122 mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
123 mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
124 mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
125 mac->ops.enable_mc = &ixgbe_enable_mc_generic;
126 mac->ops.disable_mc = &ixgbe_disable_mc_generic;
127 mac->ops.clear_vfta = NULL;
128 mac->ops.set_vfta = NULL;
129 mac->ops.set_vlvf = NULL;
130 mac->ops.init_uta_tables = NULL;
131 mac->ops.enable_rx = &ixgbe_enable_rx_generic;
132 mac->ops.disable_rx = &ixgbe_disable_rx_generic;
135 mac->ops.fc_enable = &ixgbe_fc_enable_generic;
138 mac->ops.get_link_capabilities = NULL;
139 mac->ops.setup_link = NULL;
140 mac->ops.check_link = NULL;
142 return IXGBE_SUCCESS;
146 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
148 * @hw: pointer to hardware structure
150 * This function returns true if the device supports flow control
151 * autonegotiation, and false if it does not.
154 s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
157 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
159 switch (hw->device_id) {
160 case IXGBE_DEV_ID_X540T:
161 case IXGBE_DEV_ID_X540T1:
162 case IXGBE_DEV_ID_82599_T3_LOM:
163 return IXGBE_SUCCESS;
165 return IXGBE_ERR_FC_NOT_SUPPORTED;
170 * ixgbe_setup_fc - Set up flow control
171 * @hw: pointer to hardware structure
173 * Called at init time to set up flow control.
175 STATIC s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
177 s32 ret_val = IXGBE_SUCCESS;
178 u32 reg = 0, reg_bp = 0;
180 bool got_lock = false;
182 DEBUGFUNC("ixgbe_setup_fc");
185 * Validate the requested mode. Strict IEEE mode does not allow
186 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
188 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
189 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
190 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
191 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
196 * 10gig parts do not have a word in the EEPROM to determine the
197 * default flow control setting, so we explicitly set it to full.
199 if (hw->fc.requested_mode == ixgbe_fc_default)
200 hw->fc.requested_mode = ixgbe_fc_full;
203 * Set up the 1G and 10G flow control advertisement registers so the
204 * HW will be able to do fc autoneg once the cable is plugged in. If
205 * we link at 10G, the 1G advertisement is harmless and vice versa.
207 switch (hw->phy.media_type) {
208 case ixgbe_media_type_fiber:
209 case ixgbe_media_type_backplane:
210 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
211 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
213 case ixgbe_media_type_copper:
214 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
215 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
222 * The possible values of fc.requested_mode are:
223 * 0: Flow control is completely disabled
224 * 1: Rx flow control is enabled (we can receive pause frames,
225 * but not send pause frames).
226 * 2: Tx flow control is enabled (we can send pause frames but
227 * we do not support receiving pause frames).
228 * 3: Both Rx and Tx flow control (symmetric) are enabled.
231 switch (hw->fc.requested_mode) {
233 /* Flow control completely disabled by software override. */
234 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
235 if (hw->phy.media_type == ixgbe_media_type_backplane)
236 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
237 IXGBE_AUTOC_ASM_PAUSE);
238 else if (hw->phy.media_type == ixgbe_media_type_copper)
239 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
241 case ixgbe_fc_tx_pause:
243 * Tx Flow control is enabled, and Rx Flow control is
244 * disabled by software override.
246 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
247 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
248 if (hw->phy.media_type == ixgbe_media_type_backplane) {
249 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
250 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
251 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
252 reg_cu |= IXGBE_TAF_ASM_PAUSE;
253 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
256 case ixgbe_fc_rx_pause:
258 * Rx Flow control is enabled and Tx Flow control is
259 * disabled by software override. Since there really
260 * isn't a way to advertise that we are capable of RX
261 * Pause ONLY, we will advertise that we support both
262 * symmetric and asymmetric Rx PAUSE, as such we fall
263 * through to the fc_full statement. Later, we will
264 * disable the adapter's ability to send PAUSE frames.
267 /* Flow control (both Rx and Tx) is enabled by SW override. */
268 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
269 if (hw->phy.media_type == ixgbe_media_type_backplane)
270 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
271 IXGBE_AUTOC_ASM_PAUSE;
272 else if (hw->phy.media_type == ixgbe_media_type_copper)
273 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
276 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
277 "Flow control param set incorrectly\n");
278 ret_val = IXGBE_ERR_CONFIG;
283 if (hw->mac.type < ixgbe_mac_X540) {
285 * Enable auto-negotiation between the MAC & PHY;
286 * the MAC will advertise clause 37 flow control.
288 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
289 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
291 /* Disable AN timeout */
292 if (hw->fc.strict_ieee)
293 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
295 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
296 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
300 * AUTOC restart handles negotiation of 1G and 10G on backplane
301 * and copper. There is no need to set the PCS1GCTL register.
304 if (hw->phy.media_type == ixgbe_media_type_backplane) {
305 reg_bp |= IXGBE_AUTOC_AN_RESTART;
306 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
307 * LESM is on, likewise reset_pipeline requries the lock as
308 * it also writes AUTOC.
310 if ((hw->mac.type == ixgbe_mac_82599EB) &&
311 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
312 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
313 IXGBE_GSSR_MAC_CSR_SM);
314 if (ret_val != IXGBE_SUCCESS) {
315 ret_val = IXGBE_ERR_SWFW_SYNC;
321 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
322 if (hw->mac.type == ixgbe_mac_82599EB)
323 ixgbe_reset_pipeline_82599(hw);
326 hw->mac.ops.release_swfw_sync(hw,
327 IXGBE_GSSR_MAC_CSR_SM);
328 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
329 (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
330 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
331 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
334 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
340 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
341 * @hw: pointer to hardware structure
343 * Starts the hardware by filling the bus info structure and media type, clears
344 * all on chip counters, initializes receive address registers, multicast
345 * table, VLAN filter table, calls routine to set up link and flow control
346 * settings, and leaves transmit and receive units disabled and uninitialized
348 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
353 DEBUGFUNC("ixgbe_start_hw_generic");
355 /* Set the media type */
356 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
358 /* PHY ops initialization must be done in reset_hw() */
360 /* Clear the VLAN filter table */
361 hw->mac.ops.clear_vfta(hw);
363 /* Clear statistics registers */
364 hw->mac.ops.clear_hw_cntrs(hw);
366 /* Set No Snoop Disable */
367 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
368 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
369 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
370 IXGBE_WRITE_FLUSH(hw);
372 /* Setup flow control */
373 ret_val = ixgbe_setup_fc(hw);
374 if (ret_val != IXGBE_SUCCESS)
377 /* Clear adapter stopped flag */
378 hw->adapter_stopped = false;
385 * ixgbe_start_hw_gen2 - Init sequence for common device family
386 * @hw: pointer to hw structure
388 * Performs the init sequence common to the second generation
390 * Devices in the second generation:
394 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
399 /* Clear the rate limiters */
400 for (i = 0; i < hw->mac.max_tx_queues; i++) {
401 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
402 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
404 IXGBE_WRITE_FLUSH(hw);
406 /* Disable relaxed ordering */
407 for (i = 0; i < hw->mac.max_tx_queues; i++) {
408 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
409 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
410 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
413 for (i = 0; i < hw->mac.max_rx_queues; i++) {
414 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
415 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
416 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
417 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
420 return IXGBE_SUCCESS;
424 * ixgbe_init_hw_generic - Generic hardware initialization
425 * @hw: pointer to hardware structure
427 * Initialize the hardware by resetting the hardware, filling the bus info
428 * structure and media type, clears all on chip counters, initializes receive
429 * address registers, multicast table, VLAN filter table, calls routine to set
430 * up link and flow control settings, and leaves transmit and receive units
431 * disabled and uninitialized
433 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
437 DEBUGFUNC("ixgbe_init_hw_generic");
439 /* Reset the hardware */
440 status = hw->mac.ops.reset_hw(hw);
442 if (status == IXGBE_SUCCESS) {
444 status = hw->mac.ops.start_hw(hw);
451 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
452 * @hw: pointer to hardware structure
454 * Clears all hardware statistics counters by reading them from the hardware
455 * Statistics counters are clear on read.
457 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
461 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
463 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
464 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
465 IXGBE_READ_REG(hw, IXGBE_ERRBC);
466 IXGBE_READ_REG(hw, IXGBE_MSPDC);
467 for (i = 0; i < 8; i++)
468 IXGBE_READ_REG(hw, IXGBE_MPC(i));
470 IXGBE_READ_REG(hw, IXGBE_MLFC);
471 IXGBE_READ_REG(hw, IXGBE_MRFC);
472 IXGBE_READ_REG(hw, IXGBE_RLEC);
473 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
474 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
475 if (hw->mac.type >= ixgbe_mac_82599EB) {
476 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
477 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
479 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
480 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
483 for (i = 0; i < 8; i++) {
484 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
485 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
486 if (hw->mac.type >= ixgbe_mac_82599EB) {
487 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
488 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
490 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
491 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
494 if (hw->mac.type >= ixgbe_mac_82599EB)
495 for (i = 0; i < 8; i++)
496 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
497 IXGBE_READ_REG(hw, IXGBE_PRC64);
498 IXGBE_READ_REG(hw, IXGBE_PRC127);
499 IXGBE_READ_REG(hw, IXGBE_PRC255);
500 IXGBE_READ_REG(hw, IXGBE_PRC511);
501 IXGBE_READ_REG(hw, IXGBE_PRC1023);
502 IXGBE_READ_REG(hw, IXGBE_PRC1522);
503 IXGBE_READ_REG(hw, IXGBE_GPRC);
504 IXGBE_READ_REG(hw, IXGBE_BPRC);
505 IXGBE_READ_REG(hw, IXGBE_MPRC);
506 IXGBE_READ_REG(hw, IXGBE_GPTC);
507 IXGBE_READ_REG(hw, IXGBE_GORCL);
508 IXGBE_READ_REG(hw, IXGBE_GORCH);
509 IXGBE_READ_REG(hw, IXGBE_GOTCL);
510 IXGBE_READ_REG(hw, IXGBE_GOTCH);
511 if (hw->mac.type == ixgbe_mac_82598EB)
512 for (i = 0; i < 8; i++)
513 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
514 IXGBE_READ_REG(hw, IXGBE_RUC);
515 IXGBE_READ_REG(hw, IXGBE_RFC);
516 IXGBE_READ_REG(hw, IXGBE_ROC);
517 IXGBE_READ_REG(hw, IXGBE_RJC);
518 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
519 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
520 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
521 IXGBE_READ_REG(hw, IXGBE_TORL);
522 IXGBE_READ_REG(hw, IXGBE_TORH);
523 IXGBE_READ_REG(hw, IXGBE_TPR);
524 IXGBE_READ_REG(hw, IXGBE_TPT);
525 IXGBE_READ_REG(hw, IXGBE_PTC64);
526 IXGBE_READ_REG(hw, IXGBE_PTC127);
527 IXGBE_READ_REG(hw, IXGBE_PTC255);
528 IXGBE_READ_REG(hw, IXGBE_PTC511);
529 IXGBE_READ_REG(hw, IXGBE_PTC1023);
530 IXGBE_READ_REG(hw, IXGBE_PTC1522);
531 IXGBE_READ_REG(hw, IXGBE_MPTC);
532 IXGBE_READ_REG(hw, IXGBE_BPTC);
533 for (i = 0; i < 16; i++) {
534 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
535 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
536 if (hw->mac.type >= ixgbe_mac_82599EB) {
537 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
538 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
539 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
540 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
541 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
543 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
544 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
548 if (hw->mac.type == ixgbe_mac_X540) {
550 ixgbe_identify_phy(hw);
551 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
552 IXGBE_MDIO_PCS_DEV_TYPE, &i);
553 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
554 IXGBE_MDIO_PCS_DEV_TYPE, &i);
555 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
556 IXGBE_MDIO_PCS_DEV_TYPE, &i);
557 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
558 IXGBE_MDIO_PCS_DEV_TYPE, &i);
561 return IXGBE_SUCCESS;
565 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
566 * @hw: pointer to hardware structure
567 * @pba_num: stores the part number string from the EEPROM
568 * @pba_num_size: part number string buffer length
570 * Reads the part number string from the EEPROM.
572 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
581 DEBUGFUNC("ixgbe_read_pba_string_generic");
583 if (pba_num == NULL) {
584 DEBUGOUT("PBA string buffer was null\n");
585 return IXGBE_ERR_INVALID_ARGUMENT;
588 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
590 DEBUGOUT("NVM Read Error\n");
594 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
596 DEBUGOUT("NVM Read Error\n");
601 * if data is not ptr guard the PBA must be in legacy format which
602 * means pba_ptr is actually our second data word for the PBA number
603 * and we can decode it into an ascii string
605 if (data != IXGBE_PBANUM_PTR_GUARD) {
606 DEBUGOUT("NVM PBA number is not stored as string\n");
608 /* we will need 11 characters to store the PBA */
609 if (pba_num_size < 11) {
610 DEBUGOUT("PBA string buffer too small\n");
611 return IXGBE_ERR_NO_SPACE;
614 /* extract hex string from data and pba_ptr */
615 pba_num[0] = (data >> 12) & 0xF;
616 pba_num[1] = (data >> 8) & 0xF;
617 pba_num[2] = (data >> 4) & 0xF;
618 pba_num[3] = data & 0xF;
619 pba_num[4] = (pba_ptr >> 12) & 0xF;
620 pba_num[5] = (pba_ptr >> 8) & 0xF;
623 pba_num[8] = (pba_ptr >> 4) & 0xF;
624 pba_num[9] = pba_ptr & 0xF;
626 /* put a null character on the end of our string */
629 /* switch all the data but the '-' to hex char */
630 for (offset = 0; offset < 10; offset++) {
631 if (pba_num[offset] < 0xA)
632 pba_num[offset] += '0';
633 else if (pba_num[offset] < 0x10)
634 pba_num[offset] += 'A' - 0xA;
637 return IXGBE_SUCCESS;
640 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
642 DEBUGOUT("NVM Read Error\n");
646 if (length == 0xFFFF || length == 0) {
647 DEBUGOUT("NVM PBA number section invalid length\n");
648 return IXGBE_ERR_PBA_SECTION;
651 /* check if pba_num buffer is big enough */
652 if (pba_num_size < (((u32)length * 2) - 1)) {
653 DEBUGOUT("PBA string buffer too small\n");
654 return IXGBE_ERR_NO_SPACE;
657 /* trim pba length from start of string */
661 for (offset = 0; offset < length; offset++) {
662 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
664 DEBUGOUT("NVM Read Error\n");
667 pba_num[offset * 2] = (u8)(data >> 8);
668 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
670 pba_num[offset * 2] = '\0';
672 return IXGBE_SUCCESS;
676 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
677 * @hw: pointer to hardware structure
678 * @pba_num: stores the part number from the EEPROM
680 * Reads the part number from the EEPROM.
682 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
687 DEBUGFUNC("ixgbe_read_pba_num_generic");
689 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
691 DEBUGOUT("NVM Read Error\n");
693 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
694 DEBUGOUT("NVM Not supported\n");
695 return IXGBE_NOT_IMPLEMENTED;
697 *pba_num = (u32)(data << 16);
699 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
701 DEBUGOUT("NVM Read Error\n");
706 return IXGBE_SUCCESS;
711 * @hw: pointer to the HW structure
712 * @eeprom_buf: optional pointer to EEPROM image
713 * @eeprom_buf_size: size of EEPROM image in words
714 * @max_pba_block_size: PBA block size limit
715 * @pba: pointer to output PBA structure
717 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
718 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
721 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
722 u32 eeprom_buf_size, u16 max_pba_block_size,
723 struct ixgbe_pba *pba)
729 return IXGBE_ERR_PARAM;
731 if (eeprom_buf == NULL) {
732 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
737 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
738 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
739 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
741 return IXGBE_ERR_PARAM;
745 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
746 if (pba->pba_block == NULL)
747 return IXGBE_ERR_PARAM;
749 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
755 if (pba_block_size > max_pba_block_size)
756 return IXGBE_ERR_PARAM;
758 if (eeprom_buf == NULL) {
759 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
765 if (eeprom_buf_size > (u32)(pba->word[1] +
766 pba->pba_block[0])) {
767 memcpy(pba->pba_block,
768 &eeprom_buf[pba->word[1]],
769 pba_block_size * sizeof(u16));
771 return IXGBE_ERR_PARAM;
776 return IXGBE_SUCCESS;
780 * ixgbe_write_pba_raw
781 * @hw: pointer to the HW structure
782 * @eeprom_buf: optional pointer to EEPROM image
783 * @eeprom_buf_size: size of EEPROM image in words
784 * @pba: pointer to PBA structure
786 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
787 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
790 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
791 u32 eeprom_buf_size, struct ixgbe_pba *pba)
796 return IXGBE_ERR_PARAM;
798 if (eeprom_buf == NULL) {
799 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
804 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
805 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
806 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
808 return IXGBE_ERR_PARAM;
812 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
813 if (pba->pba_block == NULL)
814 return IXGBE_ERR_PARAM;
816 if (eeprom_buf == NULL) {
817 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
823 if (eeprom_buf_size > (u32)(pba->word[1] +
824 pba->pba_block[0])) {
825 memcpy(&eeprom_buf[pba->word[1]],
827 pba->pba_block[0] * sizeof(u16));
829 return IXGBE_ERR_PARAM;
834 return IXGBE_SUCCESS;
838 * ixgbe_get_pba_block_size
839 * @hw: pointer to the HW structure
840 * @eeprom_buf: optional pointer to EEPROM image
841 * @eeprom_buf_size: size of EEPROM image in words
842 * @pba_data_size: pointer to output variable
844 * Returns the size of the PBA block in words. Function operates on EEPROM
845 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
849 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
850 u32 eeprom_buf_size, u16 *pba_block_size)
856 DEBUGFUNC("ixgbe_get_pba_block_size");
858 if (eeprom_buf == NULL) {
859 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
864 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
865 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
866 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
868 return IXGBE_ERR_PARAM;
872 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
873 if (eeprom_buf == NULL) {
874 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
879 if (eeprom_buf_size > pba_word[1])
880 length = eeprom_buf[pba_word[1] + 0];
882 return IXGBE_ERR_PARAM;
885 if (length == 0xFFFF || length == 0)
886 return IXGBE_ERR_PBA_SECTION;
888 /* PBA number in legacy format, there is no PBA Block. */
892 if (pba_block_size != NULL)
893 *pba_block_size = length;
895 return IXGBE_SUCCESS;
899 * ixgbe_get_mac_addr_generic - Generic get MAC address
900 * @hw: pointer to hardware structure
901 * @mac_addr: Adapter MAC address
903 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
904 * A reset of the adapter must be performed prior to calling this function
905 * in order for the MAC address to have been loaded from the EEPROM into RAR0
907 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
913 DEBUGFUNC("ixgbe_get_mac_addr_generic");
915 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
916 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
918 for (i = 0; i < 4; i++)
919 mac_addr[i] = (u8)(rar_low >> (i*8));
921 for (i = 0; i < 2; i++)
922 mac_addr[i+4] = (u8)(rar_high >> (i*8));
924 return IXGBE_SUCCESS;
928 * ixgbe_get_bus_info_generic - Generic set PCI bus info
929 * @hw: pointer to hardware structure
931 * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
933 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
935 struct ixgbe_mac_info *mac = &hw->mac;
938 DEBUGFUNC("ixgbe_get_bus_info_generic");
940 hw->bus.type = ixgbe_bus_type_pci_express;
942 /* Get the negotiated link width and speed from PCI config space */
943 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
945 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
946 case IXGBE_PCI_LINK_WIDTH_1:
947 hw->bus.width = ixgbe_bus_width_pcie_x1;
949 case IXGBE_PCI_LINK_WIDTH_2:
950 hw->bus.width = ixgbe_bus_width_pcie_x2;
952 case IXGBE_PCI_LINK_WIDTH_4:
953 hw->bus.width = ixgbe_bus_width_pcie_x4;
955 case IXGBE_PCI_LINK_WIDTH_8:
956 hw->bus.width = ixgbe_bus_width_pcie_x8;
959 hw->bus.width = ixgbe_bus_width_unknown;
963 switch (link_status & IXGBE_PCI_LINK_SPEED) {
964 case IXGBE_PCI_LINK_SPEED_2500:
965 hw->bus.speed = ixgbe_bus_speed_2500;
967 case IXGBE_PCI_LINK_SPEED_5000:
968 hw->bus.speed = ixgbe_bus_speed_5000;
970 case IXGBE_PCI_LINK_SPEED_8000:
971 hw->bus.speed = ixgbe_bus_speed_8000;
974 hw->bus.speed = ixgbe_bus_speed_unknown;
978 mac->ops.set_lan_id(hw);
980 return IXGBE_SUCCESS;
984 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
985 * @hw: pointer to the HW structure
987 * Determines the LAN function id by reading memory-mapped registers
988 * and swaps the port value if requested.
990 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
992 struct ixgbe_bus_info *bus = &hw->bus;
995 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
997 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
998 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
999 bus->lan_id = bus->func;
1001 /* check for a port swap */
1002 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
1003 if (reg & IXGBE_FACTPS_LFS)
1008 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1009 * @hw: pointer to hardware structure
1011 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1012 * disables transmit and receive units. The adapter_stopped flag is used by
1013 * the shared code and drivers to determine if the adapter is in a stopped
1014 * state and should not touch the hardware.
1016 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1021 DEBUGFUNC("ixgbe_stop_adapter_generic");
1024 * Set the adapter_stopped flag so other driver functions stop touching
1027 hw->adapter_stopped = true;
1029 /* Disable the receive unit */
1030 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
1032 /* Clear interrupt mask to stop interrupts from being generated */
1033 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1035 /* Clear any pending interrupts, flush previous writes */
1036 IXGBE_READ_REG(hw, IXGBE_EICR);
1038 /* Disable the transmit unit. Each queue must be disabled. */
1039 for (i = 0; i < hw->mac.max_tx_queues; i++)
1040 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1042 /* Disable the receive unit by stopping each queue */
1043 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1044 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1045 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1046 reg_val |= IXGBE_RXDCTL_SWFLSH;
1047 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1050 /* flush all queues disables */
1051 IXGBE_WRITE_FLUSH(hw);
1055 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
1056 * access and verify no pending requests
1058 return ixgbe_disable_pcie_master(hw);
1062 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1063 * @hw: pointer to hardware structure
1064 * @index: led number to turn on
1066 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1068 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1070 DEBUGFUNC("ixgbe_led_on_generic");
1072 /* To turn on the LED, set mode to ON. */
1073 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1074 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1075 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1076 IXGBE_WRITE_FLUSH(hw);
1078 return IXGBE_SUCCESS;
1082 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1083 * @hw: pointer to hardware structure
1084 * @index: led number to turn off
1086 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1088 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1090 DEBUGFUNC("ixgbe_led_off_generic");
1092 /* To turn off the LED, set mode to OFF. */
1093 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1094 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1095 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1096 IXGBE_WRITE_FLUSH(hw);
1098 return IXGBE_SUCCESS;
1102 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1103 * @hw: pointer to hardware structure
1105 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1106 * ixgbe_hw struct in order to set up EEPROM access.
1108 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1110 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1114 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1116 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1117 eeprom->type = ixgbe_eeprom_none;
1118 /* Set default semaphore delay to 10ms which is a well
1120 eeprom->semaphore_delay = 10;
1121 /* Clear EEPROM page size, it will be initialized as needed */
1122 eeprom->word_page_size = 0;
1125 * Check for EEPROM present first.
1126 * If not present leave as none
1128 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1129 if (eec & IXGBE_EEC_PRES) {
1130 eeprom->type = ixgbe_eeprom_spi;
1133 * SPI EEPROM is assumed here. This code would need to
1134 * change if a future EEPROM is not SPI.
1136 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1137 IXGBE_EEC_SIZE_SHIFT);
1138 eeprom->word_size = 1 << (eeprom_size +
1139 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1142 if (eec & IXGBE_EEC_ADDR_SIZE)
1143 eeprom->address_bits = 16;
1145 eeprom->address_bits = 8;
1146 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1147 "%d\n", eeprom->type, eeprom->word_size,
1148 eeprom->address_bits);
1151 return IXGBE_SUCCESS;
1155 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1156 * @hw: pointer to hardware structure
1157 * @offset: offset within the EEPROM to write
1158 * @words: number of word(s)
1159 * @data: 16 bit word(s) to write to EEPROM
1161 * Reads 16 bit word(s) from EEPROM through bit-bang method
1163 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1164 u16 words, u16 *data)
1166 s32 status = IXGBE_SUCCESS;
1169 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1171 hw->eeprom.ops.init_params(hw);
1174 status = IXGBE_ERR_INVALID_ARGUMENT;
1178 if (offset + words > hw->eeprom.word_size) {
1179 status = IXGBE_ERR_EEPROM;
1184 * The EEPROM page size cannot be queried from the chip. We do lazy
1185 * initialization. It is worth to do that when we write large buffer.
1187 if ((hw->eeprom.word_page_size == 0) &&
1188 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1189 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1192 * We cannot hold synchronization semaphores for too long
1193 * to avoid other entity starvation. However it is more efficient
1194 * to read in bursts than synchronizing access for each word.
1196 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1197 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1198 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1199 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1202 if (status != IXGBE_SUCCESS)
1211 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1212 * @hw: pointer to hardware structure
1213 * @offset: offset within the EEPROM to be written to
1214 * @words: number of word(s)
1215 * @data: 16 bit word(s) to be written to the EEPROM
1217 * If ixgbe_eeprom_update_checksum is not called after this function, the
1218 * EEPROM will most likely contain an invalid checksum.
1220 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1221 u16 words, u16 *data)
1227 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1229 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1231 /* Prepare the EEPROM for writing */
1232 status = ixgbe_acquire_eeprom(hw);
1234 if (status == IXGBE_SUCCESS) {
1235 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1236 ixgbe_release_eeprom(hw);
1237 status = IXGBE_ERR_EEPROM;
1241 if (status == IXGBE_SUCCESS) {
1242 for (i = 0; i < words; i++) {
1243 ixgbe_standby_eeprom(hw);
1245 /* Send the WRITE ENABLE command (8 bit opcode ) */
1246 ixgbe_shift_out_eeprom_bits(hw,
1247 IXGBE_EEPROM_WREN_OPCODE_SPI,
1248 IXGBE_EEPROM_OPCODE_BITS);
1250 ixgbe_standby_eeprom(hw);
1253 * Some SPI eeproms use the 8th address bit embedded
1256 if ((hw->eeprom.address_bits == 8) &&
1257 ((offset + i) >= 128))
1258 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1260 /* Send the Write command (8-bit opcode + addr) */
1261 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1262 IXGBE_EEPROM_OPCODE_BITS);
1263 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1264 hw->eeprom.address_bits);
1266 page_size = hw->eeprom.word_page_size;
1268 /* Send the data in burst via SPI*/
1271 word = (word >> 8) | (word << 8);
1272 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1277 /* do not wrap around page */
1278 if (((offset + i) & (page_size - 1)) ==
1281 } while (++i < words);
1283 ixgbe_standby_eeprom(hw);
1286 /* Done with writing - release the EEPROM */
1287 ixgbe_release_eeprom(hw);
1294 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1295 * @hw: pointer to hardware structure
1296 * @offset: offset within the EEPROM to be written to
1297 * @data: 16 bit word to be written to the EEPROM
1299 * If ixgbe_eeprom_update_checksum is not called after this function, the
1300 * EEPROM will most likely contain an invalid checksum.
1302 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1306 DEBUGFUNC("ixgbe_write_eeprom_generic");
1308 hw->eeprom.ops.init_params(hw);
1310 if (offset >= hw->eeprom.word_size) {
1311 status = IXGBE_ERR_EEPROM;
1315 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1322 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1323 * @hw: pointer to hardware structure
1324 * @offset: offset within the EEPROM to be read
1325 * @data: read 16 bit words(s) from EEPROM
1326 * @words: number of word(s)
1328 * Reads 16 bit word(s) from EEPROM through bit-bang method
1330 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1331 u16 words, u16 *data)
1333 s32 status = IXGBE_SUCCESS;
1336 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1338 hw->eeprom.ops.init_params(hw);
1341 status = IXGBE_ERR_INVALID_ARGUMENT;
1345 if (offset + words > hw->eeprom.word_size) {
1346 status = IXGBE_ERR_EEPROM;
1351 * We cannot hold synchronization semaphores for too long
1352 * to avoid other entity starvation. However it is more efficient
1353 * to read in bursts than synchronizing access for each word.
1355 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1356 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1357 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1359 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1362 if (status != IXGBE_SUCCESS)
1371 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1372 * @hw: pointer to hardware structure
1373 * @offset: offset within the EEPROM to be read
1374 * @words: number of word(s)
1375 * @data: read 16 bit word(s) from EEPROM
1377 * Reads 16 bit word(s) from EEPROM through bit-bang method
1379 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1380 u16 words, u16 *data)
1384 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1387 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1389 /* Prepare the EEPROM for reading */
1390 status = ixgbe_acquire_eeprom(hw);
1392 if (status == IXGBE_SUCCESS) {
1393 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1394 ixgbe_release_eeprom(hw);
1395 status = IXGBE_ERR_EEPROM;
1399 if (status == IXGBE_SUCCESS) {
1400 for (i = 0; i < words; i++) {
1401 ixgbe_standby_eeprom(hw);
1403 * Some SPI eeproms use the 8th address bit embedded
1406 if ((hw->eeprom.address_bits == 8) &&
1407 ((offset + i) >= 128))
1408 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1410 /* Send the READ command (opcode + addr) */
1411 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1412 IXGBE_EEPROM_OPCODE_BITS);
1413 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1414 hw->eeprom.address_bits);
1416 /* Read the data. */
1417 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1418 data[i] = (word_in >> 8) | (word_in << 8);
1421 /* End this read operation */
1422 ixgbe_release_eeprom(hw);
1429 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1430 * @hw: pointer to hardware structure
1431 * @offset: offset within the EEPROM to be read
1432 * @data: read 16 bit value from EEPROM
1434 * Reads 16 bit value from EEPROM through bit-bang method
1436 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1441 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1443 hw->eeprom.ops.init_params(hw);
1445 if (offset >= hw->eeprom.word_size) {
1446 status = IXGBE_ERR_EEPROM;
1450 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1457 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1458 * @hw: pointer to hardware structure
1459 * @offset: offset of word in the EEPROM to read
1460 * @words: number of word(s)
1461 * @data: 16 bit word(s) from the EEPROM
1463 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1465 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1466 u16 words, u16 *data)
1469 s32 status = IXGBE_SUCCESS;
1472 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1474 hw->eeprom.ops.init_params(hw);
1477 status = IXGBE_ERR_INVALID_ARGUMENT;
1478 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1482 if (offset >= hw->eeprom.word_size) {
1483 status = IXGBE_ERR_EEPROM;
1484 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1488 for (i = 0; i < words; i++) {
1489 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1490 IXGBE_EEPROM_RW_REG_START;
1492 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1493 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1495 if (status == IXGBE_SUCCESS) {
1496 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1497 IXGBE_EEPROM_RW_REG_DATA);
1499 DEBUGOUT("Eeprom read timed out\n");
1508 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1509 * @hw: pointer to hardware structure
1510 * @offset: offset within the EEPROM to be used as a scratch pad
1512 * Discover EEPROM page size by writing marching data at given offset.
1513 * This function is called only when we are writing a new large buffer
1514 * at given offset so the data would be overwritten anyway.
1516 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1519 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1520 s32 status = IXGBE_SUCCESS;
1523 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1525 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1528 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1529 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1530 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1531 hw->eeprom.word_page_size = 0;
1532 if (status != IXGBE_SUCCESS)
1535 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1536 if (status != IXGBE_SUCCESS)
1540 * When writing in burst more than the actual page size
1541 * EEPROM address wraps around current page.
1543 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1545 DEBUGOUT1("Detected EEPROM page size = %d words.",
1546 hw->eeprom.word_page_size);
1552 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1553 * @hw: pointer to hardware structure
1554 * @offset: offset of word in the EEPROM to read
1555 * @data: word read from the EEPROM
1557 * Reads a 16 bit word from the EEPROM using the EERD register.
1559 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1561 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1565 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1566 * @hw: pointer to hardware structure
1567 * @offset: offset of word in the EEPROM to write
1568 * @words: number of word(s)
1569 * @data: word(s) write to the EEPROM
1571 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1573 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1574 u16 words, u16 *data)
1577 s32 status = IXGBE_SUCCESS;
1580 DEBUGFUNC("ixgbe_write_eewr_generic");
1582 hw->eeprom.ops.init_params(hw);
1585 status = IXGBE_ERR_INVALID_ARGUMENT;
1586 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1590 if (offset >= hw->eeprom.word_size) {
1591 status = IXGBE_ERR_EEPROM;
1592 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1596 for (i = 0; i < words; i++) {
1597 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1598 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1599 IXGBE_EEPROM_RW_REG_START;
1601 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1602 if (status != IXGBE_SUCCESS) {
1603 DEBUGOUT("Eeprom write EEWR timed out\n");
1607 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1609 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1610 if (status != IXGBE_SUCCESS) {
1611 DEBUGOUT("Eeprom write EEWR timed out\n");
1621 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1622 * @hw: pointer to hardware structure
1623 * @offset: offset of word in the EEPROM to write
1624 * @data: word write to the EEPROM
1626 * Write a 16 bit word to the EEPROM using the EEWR register.
1628 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1630 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1634 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1635 * @hw: pointer to hardware structure
1636 * @ee_reg: EEPROM flag for polling
1638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1639 * read or write is done respectively.
1641 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1645 s32 status = IXGBE_ERR_EEPROM;
1647 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1649 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1650 if (ee_reg == IXGBE_NVM_POLL_READ)
1651 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1653 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1655 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1656 status = IXGBE_SUCCESS;
1662 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1663 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1664 "EEPROM read/write done polling timed out");
1670 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1671 * @hw: pointer to hardware structure
1673 * Prepares EEPROM for access using bit-bang method. This function should
1674 * be called before issuing a command to the EEPROM.
1676 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1678 s32 status = IXGBE_SUCCESS;
1682 DEBUGFUNC("ixgbe_acquire_eeprom");
1684 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1686 status = IXGBE_ERR_SWFW_SYNC;
1688 if (status == IXGBE_SUCCESS) {
1689 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1691 /* Request EEPROM Access */
1692 eec |= IXGBE_EEC_REQ;
1693 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1695 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1696 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1697 if (eec & IXGBE_EEC_GNT)
1702 /* Release if grant not acquired */
1703 if (!(eec & IXGBE_EEC_GNT)) {
1704 eec &= ~IXGBE_EEC_REQ;
1705 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1706 DEBUGOUT("Could not acquire EEPROM grant\n");
1708 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1709 status = IXGBE_ERR_EEPROM;
1712 /* Setup EEPROM for Read/Write */
1713 if (status == IXGBE_SUCCESS) {
1714 /* Clear CS and SK */
1715 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1716 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1717 IXGBE_WRITE_FLUSH(hw);
1725 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1726 * @hw: pointer to hardware structure
1728 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1730 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1732 s32 status = IXGBE_ERR_EEPROM;
1737 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1740 /* Get SMBI software semaphore between device drivers first */
1741 for (i = 0; i < timeout; i++) {
1743 * If the SMBI bit is 0 when we read it, then the bit will be
1744 * set and we have the semaphore
1746 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1747 if (!(swsm & IXGBE_SWSM_SMBI)) {
1748 status = IXGBE_SUCCESS;
1755 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1758 * this release is particularly important because our attempts
1759 * above to get the semaphore may have succeeded, and if there
1760 * was a timeout, we should unconditionally clear the semaphore
1761 * bits to free the driver to make progress
1763 ixgbe_release_eeprom_semaphore(hw);
1768 * If the SMBI bit is 0 when we read it, then the bit will be
1769 * set and we have the semaphore
1771 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1772 if (!(swsm & IXGBE_SWSM_SMBI))
1773 status = IXGBE_SUCCESS;
1776 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1777 if (status == IXGBE_SUCCESS) {
1778 for (i = 0; i < timeout; i++) {
1779 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1781 /* Set the SW EEPROM semaphore bit to request access */
1782 swsm |= IXGBE_SWSM_SWESMBI;
1783 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1786 * If we set the bit successfully then we got the
1789 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1790 if (swsm & IXGBE_SWSM_SWESMBI)
1797 * Release semaphores and return error if SW EEPROM semaphore
1798 * was not granted because we don't have access to the EEPROM
1801 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1802 "SWESMBI Software EEPROM semaphore not granted.\n");
1803 ixgbe_release_eeprom_semaphore(hw);
1804 status = IXGBE_ERR_EEPROM;
1807 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1808 "Software semaphore SMBI between device drivers "
1816 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1817 * @hw: pointer to hardware structure
1819 * This function clears hardware semaphore bits.
1821 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1825 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1827 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1829 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1830 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1831 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1832 IXGBE_WRITE_FLUSH(hw);
1836 * ixgbe_ready_eeprom - Polls for EEPROM ready
1837 * @hw: pointer to hardware structure
1839 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1841 s32 status = IXGBE_SUCCESS;
1845 DEBUGFUNC("ixgbe_ready_eeprom");
1848 * Read "Status Register" repeatedly until the LSB is cleared. The
1849 * EEPROM will signal that the command has been completed by clearing
1850 * bit 0 of the internal status register. If it's not cleared within
1851 * 5 milliseconds, then error out.
1853 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1854 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1855 IXGBE_EEPROM_OPCODE_BITS);
1856 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1857 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1861 ixgbe_standby_eeprom(hw);
1865 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1866 * devices (and only 0-5mSec on 5V devices)
1868 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1869 DEBUGOUT("SPI EEPROM Status error\n");
1870 status = IXGBE_ERR_EEPROM;
1877 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1878 * @hw: pointer to hardware structure
1880 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1884 DEBUGFUNC("ixgbe_standby_eeprom");
1886 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1888 /* Toggle CS to flush commands */
1889 eec |= IXGBE_EEC_CS;
1890 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1891 IXGBE_WRITE_FLUSH(hw);
1893 eec &= ~IXGBE_EEC_CS;
1894 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1895 IXGBE_WRITE_FLUSH(hw);
1900 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
1901 * @hw: pointer to hardware structure
1902 * @data: data to send to the EEPROM
1903 * @count: number of bits to shift out
1905 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
1912 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
1914 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1917 * Mask is used to shift "count" bits of "data" out to the EEPROM
1918 * one bit at a time. Determine the starting bit based on count
1920 mask = 0x01 << (count - 1);
1922 for (i = 0; i < count; i++) {
1924 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
1925 * "1", and then raising and then lowering the clock (the SK
1926 * bit controls the clock input to the EEPROM). A "0" is
1927 * shifted out to the EEPROM by setting "DI" to "0" and then
1928 * raising and then lowering the clock.
1931 eec |= IXGBE_EEC_DI;
1933 eec &= ~IXGBE_EEC_DI;
1935 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1936 IXGBE_WRITE_FLUSH(hw);
1940 ixgbe_raise_eeprom_clk(hw, &eec);
1941 ixgbe_lower_eeprom_clk(hw, &eec);
1944 * Shift mask to signify next bit of data to shift in to the
1950 /* We leave the "DI" bit set to "0" when we leave this routine. */
1951 eec &= ~IXGBE_EEC_DI;
1952 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
1953 IXGBE_WRITE_FLUSH(hw);
1957 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
1958 * @hw: pointer to hardware structure
1960 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
1966 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
1969 * In order to read a register from the EEPROM, we need to shift
1970 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
1971 * the clock input to the EEPROM (setting the SK bit), and then reading
1972 * the value of the "DO" bit. During this "shifting in" process the
1973 * "DI" bit should always be clear.
1975 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1977 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
1979 for (i = 0; i < count; i++) {
1981 ixgbe_raise_eeprom_clk(hw, &eec);
1983 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
1985 eec &= ~(IXGBE_EEC_DI);
1986 if (eec & IXGBE_EEC_DO)
1989 ixgbe_lower_eeprom_clk(hw, &eec);
1996 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
1997 * @hw: pointer to hardware structure
1998 * @eec: EEC register's current value
2000 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2002 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2005 * Raise the clock input to the EEPROM
2006 * (setting the SK bit), then delay
2008 *eec = *eec | IXGBE_EEC_SK;
2009 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2010 IXGBE_WRITE_FLUSH(hw);
2015 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2016 * @hw: pointer to hardware structure
2017 * @eecd: EECD's current value
2019 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2021 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2024 * Lower the clock input to the EEPROM (clearing the SK bit), then
2027 *eec = *eec & ~IXGBE_EEC_SK;
2028 IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
2029 IXGBE_WRITE_FLUSH(hw);
2034 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2035 * @hw: pointer to hardware structure
2037 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2041 DEBUGFUNC("ixgbe_release_eeprom");
2043 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
2045 eec |= IXGBE_EEC_CS; /* Pull CS high */
2046 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2048 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2049 IXGBE_WRITE_FLUSH(hw);
2053 /* Stop requesting EEPROM access */
2054 eec &= ~IXGBE_EEC_REQ;
2055 IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
2057 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2059 /* Delay before attempt to obtain semaphore again to allow FW access */
2060 msec_delay(hw->eeprom.semaphore_delay);
2064 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2065 * @hw: pointer to hardware structure
2067 u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2076 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2078 /* Include 0x0-0x3F in the checksum */
2079 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2080 if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
2081 DEBUGOUT("EEPROM read failed\n");
2087 /* Include all data from pointers except for the fw pointer */
2088 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2089 hw->eeprom.ops.read(hw, i, &pointer);
2091 /* Make sure the pointer seems valid */
2092 if (pointer != 0xFFFF && pointer != 0) {
2093 hw->eeprom.ops.read(hw, pointer, &length);
2095 if (length != 0xFFFF && length != 0) {
2096 for (j = pointer+1; j <= pointer+length; j++) {
2097 hw->eeprom.ops.read(hw, j, &word);
2104 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2110 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2111 * @hw: pointer to hardware structure
2112 * @checksum_val: calculated checksum
2114 * Performs checksum calculation and validates the EEPROM checksum. If the
2115 * caller does not need checksum_val, the value can be NULL.
2117 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2122 u16 read_checksum = 0;
2124 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2127 * Read the first word from the EEPROM. If this times out or fails, do
2128 * not continue or we could be in for a very long wait while every
2131 status = hw->eeprom.ops.read(hw, 0, &checksum);
2133 if (status == IXGBE_SUCCESS) {
2134 checksum = hw->eeprom.ops.calc_checksum(hw);
2136 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2139 * Verify read checksum from EEPROM is the same as
2140 * calculated checksum
2142 if (read_checksum != checksum)
2143 status = IXGBE_ERR_EEPROM_CHECKSUM;
2145 /* If the user cares, return the calculated checksum */
2147 *checksum_val = checksum;
2149 DEBUGOUT("EEPROM read failed\n");
2156 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2157 * @hw: pointer to hardware structure
2159 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2164 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2167 * Read the first word from the EEPROM. If this times out or fails, do
2168 * not continue or we could be in for a very long wait while every
2171 status = hw->eeprom.ops.read(hw, 0, &checksum);
2173 if (status == IXGBE_SUCCESS) {
2174 checksum = hw->eeprom.ops.calc_checksum(hw);
2175 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
2178 DEBUGOUT("EEPROM read failed\n");
2185 * ixgbe_validate_mac_addr - Validate MAC address
2186 * @mac_addr: pointer to MAC address.
2188 * Tests a MAC address to ensure it is a valid Individual Address
2190 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2192 s32 status = IXGBE_SUCCESS;
2194 DEBUGFUNC("ixgbe_validate_mac_addr");
2196 /* Make sure it is not a multicast address */
2197 if (IXGBE_IS_MULTICAST(mac_addr)) {
2198 DEBUGOUT("MAC address is multicast\n");
2199 status = IXGBE_ERR_INVALID_MAC_ADDR;
2200 /* Not a broadcast address */
2201 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2202 DEBUGOUT("MAC address is broadcast\n");
2203 status = IXGBE_ERR_INVALID_MAC_ADDR;
2204 /* Reject the zero address */
2205 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2206 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2207 DEBUGOUT("MAC address is all zeros\n");
2208 status = IXGBE_ERR_INVALID_MAC_ADDR;
2214 * ixgbe_set_rar_generic - Set Rx address register
2215 * @hw: pointer to hardware structure
2216 * @index: Receive address register to write
2217 * @addr: Address to put into receive address register
2218 * @vmdq: VMDq "set" or "pool" index
2219 * @enable_addr: set flag that address is active
2221 * Puts an ethernet address into a receive address register.
2223 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2226 u32 rar_low, rar_high;
2227 u32 rar_entries = hw->mac.num_rar_entries;
2229 DEBUGFUNC("ixgbe_set_rar_generic");
2231 /* Make sure we are using a valid rar index range */
2232 if (index >= rar_entries) {
2233 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2234 "RAR index %d is out of range.\n", index);
2235 return IXGBE_ERR_INVALID_ARGUMENT;
2238 /* setup VMDq pool selection before this RAR gets enabled */
2239 hw->mac.ops.set_vmdq(hw, index, vmdq);
2242 * HW expects these in little endian so we reverse the byte
2243 * order from network order (big endian) to little endian
2245 rar_low = ((u32)addr[0] |
2246 ((u32)addr[1] << 8) |
2247 ((u32)addr[2] << 16) |
2248 ((u32)addr[3] << 24));
2250 * Some parts put the VMDq setting in the extra RAH bits,
2251 * so save everything except the lower 16 bits that hold part
2252 * of the address and the address valid bit.
2254 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2255 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2256 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2258 if (enable_addr != 0)
2259 rar_high |= IXGBE_RAH_AV;
2261 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2262 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2264 return IXGBE_SUCCESS;
2268 * ixgbe_clear_rar_generic - Remove Rx address register
2269 * @hw: pointer to hardware structure
2270 * @index: Receive address register to write
2272 * Clears an ethernet address from a receive address register.
2274 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2277 u32 rar_entries = hw->mac.num_rar_entries;
2279 DEBUGFUNC("ixgbe_clear_rar_generic");
2281 /* Make sure we are using a valid rar index range */
2282 if (index >= rar_entries) {
2283 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2284 "RAR index %d is out of range.\n", index);
2285 return IXGBE_ERR_INVALID_ARGUMENT;
2289 * Some parts put the VMDq setting in the extra RAH bits,
2290 * so save everything except the lower 16 bits that hold part
2291 * of the address and the address valid bit.
2293 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2294 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2296 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2297 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2299 /* clear VMDq pool/queue selection for this RAR */
2300 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2302 return IXGBE_SUCCESS;
2306 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2307 * @hw: pointer to hardware structure
2309 * Places the MAC address in receive address register 0 and clears the rest
2310 * of the receive address registers. Clears the multicast table. Assumes
2311 * the receiver is in reset when the routine is called.
2313 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2316 u32 rar_entries = hw->mac.num_rar_entries;
2318 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2321 * If the current mac address is valid, assume it is a software override
2322 * to the permanent address.
2323 * Otherwise, use the permanent address from the eeprom.
2325 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2326 IXGBE_ERR_INVALID_MAC_ADDR) {
2327 /* Get the MAC address from the RAR0 for later reference */
2328 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2330 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2331 hw->mac.addr[0], hw->mac.addr[1],
2333 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2334 hw->mac.addr[4], hw->mac.addr[5]);
2336 /* Setup the receive address. */
2337 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2338 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2339 hw->mac.addr[0], hw->mac.addr[1],
2341 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2342 hw->mac.addr[4], hw->mac.addr[5]);
2344 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2346 /* clear VMDq pool/queue selection for RAR 0 */
2347 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2349 hw->addr_ctrl.overflow_promisc = 0;
2351 hw->addr_ctrl.rar_used_count = 1;
2353 /* Zero out the other receive addresses. */
2354 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2355 for (i = 1; i < rar_entries; i++) {
2356 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2357 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2361 hw->addr_ctrl.mta_in_use = 0;
2362 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2364 DEBUGOUT(" Clearing MTA\n");
2365 for (i = 0; i < hw->mac.mcft_size; i++)
2366 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2368 ixgbe_init_uta_tables(hw);
2370 return IXGBE_SUCCESS;
2374 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2375 * @hw: pointer to hardware structure
2376 * @addr: new address
2378 * Adds it to unused receive address register or goes into promiscuous mode.
2380 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2382 u32 rar_entries = hw->mac.num_rar_entries;
2385 DEBUGFUNC("ixgbe_add_uc_addr");
2387 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2388 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2391 * Place this address in the RAR if there is room,
2392 * else put the controller into promiscuous mode
2394 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2395 rar = hw->addr_ctrl.rar_used_count;
2396 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2397 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2398 hw->addr_ctrl.rar_used_count++;
2400 hw->addr_ctrl.overflow_promisc++;
2403 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2407 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2408 * @hw: pointer to hardware structure
2409 * @addr_list: the list of new addresses
2410 * @addr_count: number of addresses
2411 * @next: iterator function to walk the address list
2413 * The given list replaces any existing list. Clears the secondary addrs from
2414 * receive address registers. Uses unused receive address registers for the
2415 * first secondary addresses, and falls back to promiscuous mode as needed.
2417 * Drivers using secondary unicast addresses must set user_set_promisc when
2418 * manually putting the device into promiscuous mode.
2420 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2421 u32 addr_count, ixgbe_mc_addr_itr next)
2425 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2430 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2433 * Clear accounting of old secondary address list,
2434 * don't count RAR[0]
2436 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2437 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2438 hw->addr_ctrl.overflow_promisc = 0;
2440 /* Zero out the other receive addresses */
2441 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2442 for (i = 0; i < uc_addr_in_use; i++) {
2443 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2444 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2447 /* Add the new addresses */
2448 for (i = 0; i < addr_count; i++) {
2449 DEBUGOUT(" Adding the secondary addresses:\n");
2450 addr = next(hw, &addr_list, &vmdq);
2451 ixgbe_add_uc_addr(hw, addr, vmdq);
2454 if (hw->addr_ctrl.overflow_promisc) {
2455 /* enable promisc if not already in overflow or set by user */
2456 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2457 DEBUGOUT(" Entering address overflow promisc mode\n");
2458 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2459 fctrl |= IXGBE_FCTRL_UPE;
2460 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2463 /* only disable if set by overflow, not by user */
2464 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2465 DEBUGOUT(" Leaving address overflow promisc mode\n");
2466 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2467 fctrl &= ~IXGBE_FCTRL_UPE;
2468 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2472 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2473 return IXGBE_SUCCESS;
2477 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2478 * @hw: pointer to hardware structure
2479 * @mc_addr: the multicast address
2481 * Extracts the 12 bits, from a multicast address, to determine which
2482 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2483 * incoming rx multicast addresses, to determine the bit-vector to check in
2484 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2485 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2486 * to mc_filter_type.
2488 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2492 DEBUGFUNC("ixgbe_mta_vector");
2494 switch (hw->mac.mc_filter_type) {
2495 case 0: /* use bits [47:36] of the address */
2496 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2498 case 1: /* use bits [46:35] of the address */
2499 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2501 case 2: /* use bits [45:34] of the address */
2502 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2504 case 3: /* use bits [43:32] of the address */
2505 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2507 default: /* Invalid mc_filter_type */
2508 DEBUGOUT("MC filter type param set incorrectly\n");
2513 /* vector can only be 12-bits or boundary will be exceeded */
2519 * ixgbe_set_mta - Set bit-vector in multicast table
2520 * @hw: pointer to hardware structure
2521 * @hash_value: Multicast address hash value
2523 * Sets the bit-vector in the multicast table.
2525 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2531 DEBUGFUNC("ixgbe_set_mta");
2533 hw->addr_ctrl.mta_in_use++;
2535 vector = ixgbe_mta_vector(hw, mc_addr);
2536 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2539 * The MTA is a register array of 128 32-bit registers. It is treated
2540 * like an array of 4096 bits. We want to set bit
2541 * BitArray[vector_value]. So we figure out what register the bit is
2542 * in, read it, OR in the new bit, then write back the new value. The
2543 * register is determined by the upper 7 bits of the vector value and
2544 * the bit within that register are determined by the lower 5 bits of
2547 vector_reg = (vector >> 5) & 0x7F;
2548 vector_bit = vector & 0x1F;
2549 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2553 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2554 * @hw: pointer to hardware structure
2555 * @mc_addr_list: the list of new multicast addresses
2556 * @mc_addr_count: number of addresses
2557 * @next: iterator function to walk the multicast address list
2558 * @clear: flag, when set clears the table beforehand
2560 * When the clear flag is set, the given list replaces any existing list.
2561 * Hashes the given addresses into the multicast table.
2563 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2564 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2570 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2573 * Set the new number of MC addresses that we are being requested to
2576 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2577 hw->addr_ctrl.mta_in_use = 0;
2579 /* Clear mta_shadow */
2581 DEBUGOUT(" Clearing MTA\n");
2582 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2585 /* Update mta_shadow */
2586 for (i = 0; i < mc_addr_count; i++) {
2587 DEBUGOUT(" Adding the multicast addresses:\n");
2588 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2592 for (i = 0; i < hw->mac.mcft_size; i++)
2593 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2594 hw->mac.mta_shadow[i]);
2596 if (hw->addr_ctrl.mta_in_use > 0)
2597 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2598 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2600 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2601 return IXGBE_SUCCESS;
2605 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2606 * @hw: pointer to hardware structure
2608 * Enables multicast address in RAR and the use of the multicast hash table.
2610 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2612 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2614 DEBUGFUNC("ixgbe_enable_mc_generic");
2616 if (a->mta_in_use > 0)
2617 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2618 hw->mac.mc_filter_type);
2620 return IXGBE_SUCCESS;
2624 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2625 * @hw: pointer to hardware structure
2627 * Disables multicast address in RAR and the use of the multicast hash table.
2629 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2631 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2633 DEBUGFUNC("ixgbe_disable_mc_generic");
2635 if (a->mta_in_use > 0)
2636 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2638 return IXGBE_SUCCESS;
2642 * ixgbe_fc_enable_generic - Enable flow control
2643 * @hw: pointer to hardware structure
2645 * Enable flow control according to the current settings.
2647 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2649 s32 ret_val = IXGBE_SUCCESS;
2650 u32 mflcn_reg, fccfg_reg;
2655 DEBUGFUNC("ixgbe_fc_enable_generic");
2657 /* Validate the water mark configuration */
2658 if (!hw->fc.pause_time) {
2659 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2663 /* Low water mark of zero causes XOFF floods */
2664 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2665 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2666 hw->fc.high_water[i]) {
2667 if (!hw->fc.low_water[i] ||
2668 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2669 DEBUGOUT("Invalid water mark configuration\n");
2670 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2676 /* Negotiate the fc mode to use */
2677 ixgbe_fc_autoneg(hw);
2679 /* Disable any previous flow control settings */
2680 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2681 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2683 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2684 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2687 * The possible values of fc.current_mode are:
2688 * 0: Flow control is completely disabled
2689 * 1: Rx flow control is enabled (we can receive pause frames,
2690 * but not send pause frames).
2691 * 2: Tx flow control is enabled (we can send pause frames but
2692 * we do not support receiving pause frames).
2693 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2696 switch (hw->fc.current_mode) {
2699 * Flow control is disabled by software override or autoneg.
2700 * The code below will actually disable it in the HW.
2703 case ixgbe_fc_rx_pause:
2705 * Rx Flow control is enabled and Tx Flow control is
2706 * disabled by software override. Since there really
2707 * isn't a way to advertise that we are capable of RX
2708 * Pause ONLY, we will advertise that we support both
2709 * symmetric and asymmetric Rx PAUSE. Later, we will
2710 * disable the adapter's ability to send PAUSE frames.
2712 mflcn_reg |= IXGBE_MFLCN_RFCE;
2714 case ixgbe_fc_tx_pause:
2716 * Tx Flow control is enabled, and Rx Flow control is
2717 * disabled by software override.
2719 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2722 /* Flow control (both Rx and Tx) is enabled by SW override. */
2723 mflcn_reg |= IXGBE_MFLCN_RFCE;
2724 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2727 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2728 "Flow control param set incorrectly\n");
2729 ret_val = IXGBE_ERR_CONFIG;
2734 /* Set 802.3x based flow control settings. */
2735 mflcn_reg |= IXGBE_MFLCN_DPF;
2736 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2737 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2740 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2741 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2742 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2743 hw->fc.high_water[i]) {
2744 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2745 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2746 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2748 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2750 * In order to prevent Tx hangs when the internal Tx
2751 * switch is enabled we must set the high water mark
2752 * to the maximum FCRTH value. This allows the Tx
2753 * switch to function even under heavy Rx workloads.
2755 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2758 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2761 /* Configure pause time (2 TCs per register) */
2762 reg = hw->fc.pause_time * 0x00010001;
2763 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2764 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2766 /* Configure flow control refresh threshold value */
2767 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2774 * ixgbe_negotiate_fc - Negotiate flow control
2775 * @hw: pointer to hardware structure
2776 * @adv_reg: flow control advertised settings
2777 * @lp_reg: link partner's flow control settings
2778 * @adv_sym: symmetric pause bit in advertisement
2779 * @adv_asm: asymmetric pause bit in advertisement
2780 * @lp_sym: symmetric pause bit in link partner advertisement
2781 * @lp_asm: asymmetric pause bit in link partner advertisement
2783 * Find the intersection between advertised settings and link partner's
2784 * advertised settings
2786 STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2787 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2789 if ((!(adv_reg)) || (!(lp_reg))) {
2790 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2791 "Local or link partner's advertised flow control "
2792 "settings are NULL. Local: %x, link partner: %x\n",
2794 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2797 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2799 * Now we need to check if the user selected Rx ONLY
2800 * of pause frames. In this case, we had to advertise
2801 * FULL flow control because we could not advertise RX
2802 * ONLY. Hence, we must now check to see if we need to
2803 * turn OFF the TRANSMISSION of PAUSE frames.
2805 if (hw->fc.requested_mode == ixgbe_fc_full) {
2806 hw->fc.current_mode = ixgbe_fc_full;
2807 DEBUGOUT("Flow Control = FULL.\n");
2809 hw->fc.current_mode = ixgbe_fc_rx_pause;
2810 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2812 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2813 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2814 hw->fc.current_mode = ixgbe_fc_tx_pause;
2815 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2816 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2817 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2818 hw->fc.current_mode = ixgbe_fc_rx_pause;
2819 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2821 hw->fc.current_mode = ixgbe_fc_none;
2822 DEBUGOUT("Flow Control = NONE.\n");
2824 return IXGBE_SUCCESS;
2828 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2829 * @hw: pointer to hardware structure
2831 * Enable flow control according on 1 gig fiber.
2833 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2835 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2836 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2839 * On multispeed fiber at 1g, bail out if
2840 * - link is up but AN did not complete, or if
2841 * - link is up and AN completed but timed out
2844 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2845 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2846 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2847 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2848 "Auto-Negotiation did not complete or timed out");
2852 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2853 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2855 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2856 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2857 IXGBE_PCS1GANA_ASM_PAUSE,
2858 IXGBE_PCS1GANA_SYM_PAUSE,
2859 IXGBE_PCS1GANA_ASM_PAUSE);
2866 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2867 * @hw: pointer to hardware structure
2869 * Enable flow control according to IEEE clause 37.
2871 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2873 u32 links2, anlp1_reg, autoc_reg, links;
2874 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2877 * On backplane, bail out if
2878 * - backplane autoneg was not completed, or if
2879 * - we are 82599 and link partner is not AN enabled
2881 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2882 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
2883 ERROR_REPORT1(IXGBE_ERROR_POLLING,
2884 "Auto-Negotiation did not complete");
2888 if (hw->mac.type == ixgbe_mac_82599EB) {
2889 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2890 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
2891 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2892 "Link partner is not AN enabled");
2897 * Read the 10g AN autoc and LP ability registers and resolve
2898 * local flow control settings accordingly
2900 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2901 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2903 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
2904 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
2905 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
2912 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
2913 * @hw: pointer to hardware structure
2915 * Enable flow control according to IEEE clause 37.
2917 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2919 u16 technology_ability_reg = 0;
2920 u16 lp_technology_ability_reg = 0;
2922 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
2923 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2924 &technology_ability_reg);
2925 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
2926 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
2927 &lp_technology_ability_reg);
2929 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
2930 (u32)lp_technology_ability_reg,
2931 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
2932 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
2936 * ixgbe_fc_autoneg - Configure flow control
2937 * @hw: pointer to hardware structure
2939 * Compares our advertised flow control capabilities to those advertised by
2940 * our link partner, and determines the proper flow control mode to use.
2942 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2944 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2945 ixgbe_link_speed speed;
2948 DEBUGFUNC("ixgbe_fc_autoneg");
2951 * AN should have completed when the cable was plugged in.
2952 * Look for reasons to bail out. Bail out if:
2953 * - FC autoneg is disabled, or if
2956 if (hw->fc.disable_fc_autoneg) {
2957 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
2958 "Flow control autoneg is disabled");
2962 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2964 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
2968 switch (hw->phy.media_type) {
2969 /* Autoneg flow control on fiber adapters */
2970 case ixgbe_media_type_fiber:
2971 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2972 ret_val = ixgbe_fc_autoneg_fiber(hw);
2975 /* Autoneg flow control on backplane adapters */
2976 case ixgbe_media_type_backplane:
2977 ret_val = ixgbe_fc_autoneg_backplane(hw);
2980 /* Autoneg flow control on copper adapters */
2981 case ixgbe_media_type_copper:
2982 if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
2983 ret_val = ixgbe_fc_autoneg_copper(hw);
2991 if (ret_val == IXGBE_SUCCESS) {
2992 hw->fc.fc_was_autonegged = true;
2994 hw->fc.fc_was_autonegged = false;
2995 hw->fc.current_mode = hw->fc.requested_mode;
3000 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3001 * @hw: pointer to hardware structure
3003 * System-wide timeout range is encoded in PCIe Device Control2 register.
3005 * Add 10% to specified maximum and return the number of times to poll for
3006 * completion timeout, in units of 100 microsec. Never return less than
3007 * 800 = 80 millisec.
3009 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3014 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3015 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3018 case IXGBE_PCIDEVCTRL2_65_130ms:
3019 pollcnt = 1300; /* 130 millisec */
3021 case IXGBE_PCIDEVCTRL2_260_520ms:
3022 pollcnt = 5200; /* 520 millisec */
3024 case IXGBE_PCIDEVCTRL2_1_2s:
3025 pollcnt = 20000; /* 2 sec */
3027 case IXGBE_PCIDEVCTRL2_4_8s:
3028 pollcnt = 80000; /* 8 sec */
3030 case IXGBE_PCIDEVCTRL2_17_34s:
3031 pollcnt = 34000; /* 34 sec */
3033 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3034 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3035 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3036 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3038 pollcnt = 800; /* 80 millisec minimum */
3042 /* add 10% to spec maximum */
3043 return (pollcnt * 11) / 10;
3047 * ixgbe_disable_pcie_master - Disable PCI-express master access
3048 * @hw: pointer to hardware structure
3050 * Disables PCI-Express master access and verifies there are no pending
3051 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3052 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3053 * is returned signifying master requests disabled.
3055 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3057 s32 status = IXGBE_SUCCESS;
3061 DEBUGFUNC("ixgbe_disable_pcie_master");
3063 /* Always set this bit to ensure any future transactions are blocked */
3064 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3066 /* Exit if master requests are blocked */
3067 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3068 IXGBE_REMOVED(hw->hw_addr))
3071 /* Poll for master request bit to clear */
3072 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3074 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3079 * Two consecutive resets are required via CTRL.RST per datasheet
3080 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3081 * of this need. The first reset prevents new master requests from
3082 * being issued by our device. We then must wait 1usec or more for any
3083 * remaining completions from the PCIe bus to trickle in, and then reset
3084 * again to clear out any effects they may have had on our device.
3086 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3087 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3090 * Before proceeding, make sure that the PCIe block does not have
3091 * transactions pending.
3093 poll = ixgbe_pcie_timeout_poll(hw);
3094 for (i = 0; i < poll; i++) {
3096 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3097 if (IXGBE_REMOVED(hw->hw_addr))
3099 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3103 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3104 "PCIe transaction pending bit also did not clear.\n");
3105 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3112 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3113 * @hw: pointer to hardware structure
3114 * @mask: Mask to specify which semaphore to acquire
3116 * Acquires the SWFW semaphore through the GSSR register for the specified
3117 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3119 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3123 u32 fwmask = mask << 5;
3126 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3130 * SW EEPROM semaphore bit is used for access to all
3131 * SW_FW_SYNC/GSSR bits (not just EEPROM)
3133 if (ixgbe_get_eeprom_semaphore(hw))
3134 return IXGBE_ERR_SWFW_SYNC;
3136 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3137 if (!(gssr & (fwmask | swmask)))
3141 * Firmware currently using resource (fwmask) or other software
3142 * thread currently using resource (swmask)
3144 ixgbe_release_eeprom_semaphore(hw);
3150 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
3151 return IXGBE_ERR_SWFW_SYNC;
3155 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3157 ixgbe_release_eeprom_semaphore(hw);
3158 return IXGBE_SUCCESS;
3162 * ixgbe_release_swfw_sync - Release SWFW semaphore
3163 * @hw: pointer to hardware structure
3164 * @mask: Mask to specify which semaphore to release
3166 * Releases the SWFW semaphore through the GSSR register for the specified
3167 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3169 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
3174 DEBUGFUNC("ixgbe_release_swfw_sync");
3176 ixgbe_get_eeprom_semaphore(hw);
3178 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3180 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3182 ixgbe_release_eeprom_semaphore(hw);
3186 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3187 * @hw: pointer to hardware structure
3189 * Stops the receive data path and waits for the HW to internally empty
3190 * the Rx security block
3192 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3194 #define IXGBE_MAX_SECRX_POLL 40
3199 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3202 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3203 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3204 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3205 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3206 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3207 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3210 /* Use interrupt-safe sleep just in case */
3214 /* For informational purposes only */
3215 if (i >= IXGBE_MAX_SECRX_POLL)
3216 DEBUGOUT("Rx unit being enabled before security "
3217 "path fully disabled. Continuing with init.\n");
3219 return IXGBE_SUCCESS;
3223 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3224 * @hw: pointer to hardware structure
3226 * Enables the receive data path.
3228 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3232 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3234 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3235 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3236 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3237 IXGBE_WRITE_FLUSH(hw);
3239 return IXGBE_SUCCESS;
3243 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3244 * @hw: pointer to hardware structure
3245 * @regval: register value to write to RXCTRL
3247 * Enables the Rx DMA unit
3249 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3251 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3253 if (regval & IXGBE_RXCTRL_RXEN)
3254 ixgbe_enable_rx(hw);
3256 ixgbe_disable_rx(hw);
3258 return IXGBE_SUCCESS;
3262 * ixgbe_blink_led_start_generic - Blink LED based on index.
3263 * @hw: pointer to hardware structure
3264 * @index: led number to blink
3266 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3268 ixgbe_link_speed speed = 0;
3270 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3271 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3272 s32 ret_val = IXGBE_SUCCESS;
3274 DEBUGFUNC("ixgbe_blink_led_start_generic");
3277 * Link must be up to auto-blink the LEDs;
3278 * Force it if link is down.
3280 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3283 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3286 bool got_lock = false;
3287 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3288 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3289 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3290 IXGBE_GSSR_MAC_CSR_SM);
3291 if (ret_val != IXGBE_SUCCESS) {
3292 ret_val = IXGBE_ERR_SWFW_SYNC;
3298 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3299 autoc_reg |= IXGBE_AUTOC_FLU;
3300 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3301 IXGBE_WRITE_FLUSH(hw);
3304 hw->mac.ops.release_swfw_sync(hw,
3305 IXGBE_GSSR_MAC_CSR_SM);
3309 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3310 led_reg |= IXGBE_LED_BLINK(index);
3311 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3312 IXGBE_WRITE_FLUSH(hw);
3319 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3320 * @hw: pointer to hardware structure
3321 * @index: led number to stop blinking
3323 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3325 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3326 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3327 s32 ret_val = IXGBE_SUCCESS;
3328 bool got_lock = false;
3330 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3331 /* Need the SW/FW semaphore around AUTOC writes if 82599 and
3334 if ((hw->mac.type == ixgbe_mac_82599EB) &&
3335 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
3336 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
3337 IXGBE_GSSR_MAC_CSR_SM);
3338 if (ret_val != IXGBE_SUCCESS) {
3339 ret_val = IXGBE_ERR_SWFW_SYNC;
3346 autoc_reg &= ~IXGBE_AUTOC_FLU;
3347 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3348 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
3350 if (hw->mac.type == ixgbe_mac_82599EB)
3351 ixgbe_reset_pipeline_82599(hw);
3354 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
3356 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3357 led_reg &= ~IXGBE_LED_BLINK(index);
3358 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3359 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3360 IXGBE_WRITE_FLUSH(hw);
3367 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3368 * @hw: pointer to hardware structure
3369 * @san_mac_offset: SAN MAC address offset
3371 * This function will read the EEPROM location for the SAN MAC address
3372 * pointer, and returns the value at that location. This is used in both
3373 * get and set mac_addr routines.
3375 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3376 u16 *san_mac_offset)
3380 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3383 * First read the EEPROM pointer to see if the MAC addresses are
3386 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3389 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3390 "eeprom at offset %d failed",
3391 IXGBE_SAN_MAC_ADDR_PTR);
3398 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3399 * @hw: pointer to hardware structure
3400 * @san_mac_addr: SAN MAC address
3402 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3403 * per-port, so set_lan_id() must be called before reading the addresses.
3404 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3405 * upon for non-SFP connections, so we must call it here.
3407 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3409 u16 san_mac_data, san_mac_offset;
3413 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3416 * First read the EEPROM pointer to see if the MAC addresses are
3417 * available. If they're not, no point in calling set_lan_id() here.
3419 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3420 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3421 goto san_mac_addr_out;
3423 /* make sure we know which port we need to program */
3424 hw->mac.ops.set_lan_id(hw);
3425 /* apply the port offset to the address offset */
3426 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3427 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3428 for (i = 0; i < 3; i++) {
3429 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3432 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3433 "eeprom read at offset %d failed",
3435 goto san_mac_addr_out;
3437 san_mac_addr[i * 2] = (u8)(san_mac_data);
3438 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3441 return IXGBE_SUCCESS;
3445 * No addresses available in this EEPROM. It's not an
3446 * error though, so just wipe the local address and return.
3448 for (i = 0; i < 6; i++)
3449 san_mac_addr[i] = 0xFF;
3450 return IXGBE_SUCCESS;
3454 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3455 * @hw: pointer to hardware structure
3456 * @san_mac_addr: SAN MAC address
3458 * Write a SAN MAC address to the EEPROM.
3460 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3463 u16 san_mac_data, san_mac_offset;
3466 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3468 /* Look for SAN mac address pointer. If not defined, return */
3469 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3470 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3471 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3473 /* Make sure we know which port we need to write */
3474 hw->mac.ops.set_lan_id(hw);
3475 /* Apply the port offset to the address offset */
3476 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3477 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3479 for (i = 0; i < 3; i++) {
3480 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3481 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3482 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3486 return IXGBE_SUCCESS;
3490 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3491 * @hw: pointer to hardware structure
3493 * Read PCIe configuration space, and get the MSI-X vector count from
3494 * the capabilities table.
3496 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3502 switch (hw->mac.type) {
3503 case ixgbe_mac_82598EB:
3504 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3505 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3507 case ixgbe_mac_82599EB:
3508 case ixgbe_mac_X540:
3509 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3510 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3516 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3517 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3518 if (IXGBE_REMOVED(hw->hw_addr))
3520 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3522 /* MSI-X count is zero-based in HW */
3525 if (msix_count > max_msix_count)
3526 msix_count = max_msix_count;
3532 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3533 * @hw: pointer to hardware structure
3534 * @addr: Address to put into receive address register
3535 * @vmdq: VMDq pool to assign
3537 * Puts an ethernet address into a receive address register, or
3538 * finds the rar that it is aleady in; adds to the pool list
3540 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3542 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3543 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3545 u32 rar_low, rar_high;
3546 u32 addr_low, addr_high;
3548 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3550 /* swap bytes for HW little endian */
3551 addr_low = addr[0] | (addr[1] << 8)
3554 addr_high = addr[4] | (addr[5] << 8);
3557 * Either find the mac_id in rar or find the first empty space.
3558 * rar_highwater points to just after the highest currently used
3559 * rar in order to shorten the search. It grows when we add a new
3562 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3563 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3565 if (((IXGBE_RAH_AV & rar_high) == 0)
3566 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3567 first_empty_rar = rar;
3568 } else if ((rar_high & 0xFFFF) == addr_high) {
3569 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3570 if (rar_low == addr_low)
3571 break; /* found it already in the rars */
3575 if (rar < hw->mac.rar_highwater) {
3576 /* already there so just add to the pool bits */
3577 ixgbe_set_vmdq(hw, rar, vmdq);
3578 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3579 /* stick it into first empty RAR slot we found */
3580 rar = first_empty_rar;
3581 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3582 } else if (rar == hw->mac.rar_highwater) {
3583 /* add it to the top of the list and inc the highwater mark */
3584 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3585 hw->mac.rar_highwater++;
3586 } else if (rar >= hw->mac.num_rar_entries) {
3587 return IXGBE_ERR_INVALID_MAC_ADDR;
3591 * If we found rar[0], make sure the default pool bit (we use pool 0)
3592 * remains cleared to be sure default pool packets will get delivered
3595 ixgbe_clear_vmdq(hw, rar, 0);
3601 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3602 * @hw: pointer to hardware struct
3603 * @rar: receive address register index to disassociate
3604 * @vmdq: VMDq pool index to remove from the rar
3606 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3608 u32 mpsar_lo, mpsar_hi;
3609 u32 rar_entries = hw->mac.num_rar_entries;
3611 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3613 /* Make sure we are using a valid rar index range */
3614 if (rar >= rar_entries) {
3615 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3616 "RAR index %d is out of range.\n", rar);
3617 return IXGBE_ERR_INVALID_ARGUMENT;
3620 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3621 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3623 if (IXGBE_REMOVED(hw->hw_addr))
3626 if (!mpsar_lo && !mpsar_hi)
3629 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3631 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3635 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3638 } else if (vmdq < 32) {
3639 mpsar_lo &= ~(1 << vmdq);
3640 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3642 mpsar_hi &= ~(1 << (vmdq - 32));
3643 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3646 /* was that the last pool using this rar? */
3647 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
3648 hw->mac.ops.clear_rar(hw, rar);
3650 return IXGBE_SUCCESS;
3654 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3655 * @hw: pointer to hardware struct
3656 * @rar: receive address register index to associate with a VMDq index
3657 * @vmdq: VMDq pool index
3659 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3662 u32 rar_entries = hw->mac.num_rar_entries;
3664 DEBUGFUNC("ixgbe_set_vmdq_generic");
3666 /* Make sure we are using a valid rar index range */
3667 if (rar >= rar_entries) {
3668 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3669 "RAR index %d is out of range.\n", rar);
3670 return IXGBE_ERR_INVALID_ARGUMENT;
3674 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3676 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3678 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3679 mpsar |= 1 << (vmdq - 32);
3680 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3682 return IXGBE_SUCCESS;
3686 * This function should only be involved in the IOV mode.
3687 * In IOV mode, Default pool is next pool after the number of
3688 * VFs advertized and not 0.
3689 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3691 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3692 * @hw: pointer to hardware struct
3693 * @vmdq: VMDq pool index
3695 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3697 u32 rar = hw->mac.san_mac_rar_index;
3699 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3702 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3703 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3705 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3706 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3709 return IXGBE_SUCCESS;
3713 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3714 * @hw: pointer to hardware structure
3716 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3720 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3721 DEBUGOUT(" Clearing UTA\n");
3723 for (i = 0; i < 128; i++)
3724 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3726 return IXGBE_SUCCESS;
3730 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3731 * @hw: pointer to hardware structure
3732 * @vlan: VLAN id to write to VLAN filter
3734 * return the VLVF index where this VLAN id should be placed
3737 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
3740 u32 first_empty_slot = 0;
3743 /* short cut the special case */
3748 * Search for the vlan id in the VLVF entries. Save off the first empty
3749 * slot found along the way
3751 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
3752 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3753 if (!bits && !(first_empty_slot))
3754 first_empty_slot = regindex;
3755 else if ((bits & 0x0FFF) == vlan)
3760 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
3761 * in the VLVF. Else use the first empty VLVF register for this
3764 if (regindex >= IXGBE_VLVF_ENTRIES) {
3765 if (first_empty_slot)
3766 regindex = first_empty_slot;
3768 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE,
3769 "No space in VLVF.\n");
3770 regindex = IXGBE_ERR_NO_SPACE;
3778 * ixgbe_set_vfta_generic - Set VLAN filter table
3779 * @hw: pointer to hardware structure
3780 * @vlan: VLAN id to write to VLAN filter
3781 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3782 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3784 * Turn on/off specified VLAN in the VLAN filter table.
3786 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3793 s32 ret_val = IXGBE_SUCCESS;
3794 bool vfta_changed = false;
3796 DEBUGFUNC("ixgbe_set_vfta_generic");
3799 return IXGBE_ERR_PARAM;
3802 * this is a 2 part operation - first the VFTA, then the
3803 * VLVF and VLVFB if VT Mode is set
3804 * We don't write the VFTA until we know the VLVF part succeeded.
3808 * The VFTA is a bitstring made up of 128 32-bit registers
3809 * that enable the particular VLAN id, much like the MTA:
3810 * bits[11-5]: which register
3811 * bits[4-0]: which bit in the register
3813 regindex = (vlan >> 5) & 0x7F;
3814 bitindex = vlan & 0x1F;
3815 targetbit = (1 << bitindex);
3816 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
3819 if (!(vfta & targetbit)) {
3821 vfta_changed = true;
3824 if ((vfta & targetbit)) {
3826 vfta_changed = true;
3831 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3833 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on,
3835 if (ret_val != IXGBE_SUCCESS)
3839 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
3841 return IXGBE_SUCCESS;
3845 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3846 * @hw: pointer to hardware structure
3847 * @vlan: VLAN id to write to VLAN filter
3848 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
3849 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
3850 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
3853 * Turn on/off specified bit in VLVF table.
3855 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3856 bool vlan_on, bool *vfta_changed)
3860 DEBUGFUNC("ixgbe_set_vlvf_generic");
3863 return IXGBE_ERR_PARAM;
3865 /* If VT Mode is set
3867 * make sure the vlan is in VLVF
3868 * set the vind bit in the matching VLVFB
3870 * clear the pool bit and possibly the vind
3872 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
3873 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
3877 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
3882 /* set the pool bit */
3884 bits = IXGBE_READ_REG(hw,
3885 IXGBE_VLVFB(vlvf_index * 2));
3886 bits |= (1 << vind);
3888 IXGBE_VLVFB(vlvf_index * 2),
3891 bits = IXGBE_READ_REG(hw,
3892 IXGBE_VLVFB((vlvf_index * 2) + 1));
3893 bits |= (1 << (vind - 32));
3895 IXGBE_VLVFB((vlvf_index * 2) + 1),
3899 /* clear the pool bit */
3901 bits = IXGBE_READ_REG(hw,
3902 IXGBE_VLVFB(vlvf_index * 2));
3903 bits &= ~(1 << vind);
3905 IXGBE_VLVFB(vlvf_index * 2),
3907 bits |= IXGBE_READ_REG(hw,
3908 IXGBE_VLVFB((vlvf_index * 2) + 1));
3910 bits = IXGBE_READ_REG(hw,
3911 IXGBE_VLVFB((vlvf_index * 2) + 1));
3912 bits &= ~(1 << (vind - 32));
3914 IXGBE_VLVFB((vlvf_index * 2) + 1),
3916 bits |= IXGBE_READ_REG(hw,
3917 IXGBE_VLVFB(vlvf_index * 2));
3922 * If there are still bits set in the VLVFB registers
3923 * for the VLAN ID indicated we need to see if the
3924 * caller is requesting that we clear the VFTA entry bit.
3925 * If the caller has requested that we clear the VFTA
3926 * entry bit but there are still pools/VFs using this VLAN
3927 * ID entry then ignore the request. We're not worried
3928 * about the case where we're turning the VFTA VLAN ID
3929 * entry bit on, only when requested to turn it off as
3930 * there may be multiple pools and/or VFs using the
3931 * VLAN ID entry. In that case we cannot clear the
3932 * VFTA bit until all pools/VFs using that VLAN ID have also
3933 * been cleared. This will be indicated by "bits" being
3937 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
3938 (IXGBE_VLVF_VIEN | vlan));
3939 if ((!vlan_on) && (vfta_changed != NULL)) {
3940 /* someone wants to clear the vfta entry
3941 * but some pools/VFs are still using it.
3943 *vfta_changed = false;
3946 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
3949 return IXGBE_SUCCESS;
3953 * ixgbe_clear_vfta_generic - Clear VLAN filter table
3954 * @hw: pointer to hardware structure
3956 * Clears the VLAN filer table, and the VMDq index associated with the filter
3958 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
3962 DEBUGFUNC("ixgbe_clear_vfta_generic");
3964 for (offset = 0; offset < hw->mac.vft_size; offset++)
3965 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
3967 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
3968 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
3969 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
3970 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
3973 return IXGBE_SUCCESS;
3977 * ixgbe_check_mac_link_generic - Determine link and speed status
3978 * @hw: pointer to hardware structure
3979 * @speed: pointer to link speed
3980 * @link_up: true when link is up
3981 * @link_up_wait_to_complete: bool used to wait for link up or not
3983 * Reads the links register to determine if link is up and the current speed
3985 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
3986 bool *link_up, bool link_up_wait_to_complete)
3988 u32 links_reg, links_orig;
3991 DEBUGFUNC("ixgbe_check_mac_link_generic");
3993 /* clear the old state */
3994 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
3996 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
3998 if (links_orig != links_reg) {
3999 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4000 links_orig, links_reg);
4003 if (link_up_wait_to_complete) {
4004 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
4005 if (links_reg & IXGBE_LINKS_UP) {
4012 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4015 if (links_reg & IXGBE_LINKS_UP)
4021 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4022 IXGBE_LINKS_SPEED_10G_82599)
4023 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4024 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4025 IXGBE_LINKS_SPEED_1G_82599)
4026 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4027 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
4028 IXGBE_LINKS_SPEED_100_82599)
4029 *speed = IXGBE_LINK_SPEED_100_FULL;
4031 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4033 return IXGBE_SUCCESS;
4037 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4039 * @hw: pointer to hardware structure
4040 * @wwnn_prefix: the alternative WWNN prefix
4041 * @wwpn_prefix: the alternative WWPN prefix
4043 * This function will read the EEPROM from the alternative SAN MAC address
4044 * block to check the support for the alternative WWNN/WWPN prefix support.
4046 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4050 u16 alt_san_mac_blk_offset;
4052 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4054 /* clear output first */
4055 *wwnn_prefix = 0xFFFF;
4056 *wwpn_prefix = 0xFFFF;
4058 /* check if alternative SAN MAC is supported */
4059 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4060 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4061 goto wwn_prefix_err;
4063 if ((alt_san_mac_blk_offset == 0) ||
4064 (alt_san_mac_blk_offset == 0xFFFF))
4065 goto wwn_prefix_out;
4067 /* check capability in alternative san mac address block */
4068 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4069 if (hw->eeprom.ops.read(hw, offset, &caps))
4070 goto wwn_prefix_err;
4071 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4072 goto wwn_prefix_out;
4074 /* get the corresponding prefix for WWNN/WWPN */
4075 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4076 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4077 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4078 "eeprom read at offset %d failed", offset);
4081 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4082 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4083 goto wwn_prefix_err;
4086 return IXGBE_SUCCESS;
4089 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4090 "eeprom read at offset %d failed", offset);
4091 return IXGBE_SUCCESS;
4095 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4096 * @hw: pointer to hardware structure
4097 * @bs: the fcoe boot status
4099 * This function will read the FCOE boot status from the iSCSI FCOE block
4101 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4103 u16 offset, caps, flags;
4106 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4108 /* clear output first */
4109 *bs = ixgbe_fcoe_bootstatus_unavailable;
4111 /* check if FCOE IBA block is present */
4112 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4113 status = hw->eeprom.ops.read(hw, offset, &caps);
4114 if (status != IXGBE_SUCCESS)
4117 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4120 /* check if iSCSI FCOE block is populated */
4121 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4122 if (status != IXGBE_SUCCESS)
4125 if ((offset == 0) || (offset == 0xFFFF))
4128 /* read fcoe flags in iSCSI FCOE block */
4129 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4130 status = hw->eeprom.ops.read(hw, offset, &flags);
4131 if (status != IXGBE_SUCCESS)
4134 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4135 *bs = ixgbe_fcoe_bootstatus_enabled;
4137 *bs = ixgbe_fcoe_bootstatus_disabled;
4144 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4145 * @hw: pointer to hardware structure
4146 * @enable: enable or disable switch for anti-spoofing
4147 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
4150 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
4153 int pf_target_reg = pf >> 3;
4154 int pf_target_shift = pf % 8;
4157 if (hw->mac.type == ixgbe_mac_82598EB)
4161 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
4164 * PFVFSPOOF register array is size 8 with 8 bits assigned to
4165 * MAC anti-spoof enables in each register array element.
4167 for (j = 0; j < pf_target_reg; j++)
4168 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4171 * The PF should be allowed to spoof so that it can support
4172 * emulation mode NICs. Do not set the bits assigned to the PF
4174 pfvfspoof &= (1 << pf_target_shift) - 1;
4175 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
4178 * Remaining pools belong to the PF so they do not need to have
4179 * anti-spoofing enabled.
4181 for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
4182 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
4186 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4187 * @hw: pointer to hardware structure
4188 * @enable: enable or disable switch for VLAN anti-spoofing
4189 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4192 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4194 int vf_target_reg = vf >> 3;
4195 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4198 if (hw->mac.type == ixgbe_mac_82598EB)
4201 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4203 pfvfspoof |= (1 << vf_target_shift);
4205 pfvfspoof &= ~(1 << vf_target_shift);
4206 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4210 * ixgbe_get_device_caps_generic - Get additional device capabilities
4211 * @hw: pointer to hardware structure
4212 * @device_caps: the EEPROM word with the extra device capabilities
4214 * This function will read the EEPROM location for the device capabilities,
4215 * and return the word through device_caps.
4217 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4219 DEBUGFUNC("ixgbe_get_device_caps_generic");
4221 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4223 return IXGBE_SUCCESS;
4227 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4228 * @hw: pointer to hardware structure
4231 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4236 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4238 /* Enable relaxed ordering */
4239 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4240 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4241 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4242 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4245 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4246 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4247 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4248 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4249 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4255 * ixgbe_calculate_checksum - Calculate checksum for buffer
4256 * @buffer: pointer to EEPROM
4257 * @length: size of EEPROM to calculate a checksum for
4258 * Calculates the checksum for some buffer on a specified length. The
4259 * checksum calculated is returned.
4261 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4266 DEBUGFUNC("ixgbe_calculate_checksum");
4271 for (i = 0; i < length; i++)
4274 return (u8) (0 - sum);
4278 * ixgbe_host_interface_command - Issue command to manageability block
4279 * @hw: pointer to the HW structure
4280 * @buffer: contains the command to write and where the return status will
4282 * @length: length of buffer, must be multiple of 4 bytes
4284 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4285 * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
4287 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4291 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4292 u8 buf_len, dword_len;
4294 s32 ret_val = IXGBE_SUCCESS;
4296 DEBUGFUNC("ixgbe_host_interface_command");
4298 if (length == 0 || length & 0x3 ||
4299 length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4300 DEBUGOUT("Buffer length failure.\n");
4301 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4305 /* Check that the host interface is enabled. */
4306 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4307 if ((hicr & IXGBE_HICR_EN) == 0) {
4308 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4309 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4313 /* Calculate length in DWORDs */
4314 dword_len = length >> 2;
4317 * The device driver writes the relevant command block
4318 * into the ram area.
4320 for (i = 0; i < dword_len; i++)
4321 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4322 i, IXGBE_CPU_TO_LE32(buffer[i]));
4324 /* Setting this bit tells the ARC that a new command is pending. */
4325 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4327 for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
4328 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4329 if (!(hicr & IXGBE_HICR_C))
4334 /* Check command successful completion. */
4335 if (i == IXGBE_HI_COMMAND_TIMEOUT ||
4336 (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
4337 DEBUGOUT("Command has failed with no status valid.\n");
4338 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4342 /* Calculate length in DWORDs */
4343 dword_len = hdr_size >> 2;
4345 /* first pull in the header so we know the buffer length */
4346 for (bi = 0; bi < dword_len; bi++) {
4347 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4348 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4351 /* If there is any thing in data position pull it in */
4352 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4356 if (length < (buf_len + hdr_size)) {
4357 DEBUGOUT("Buffer not large enough for reply message.\n");
4358 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4362 /* Calculate length in DWORDs, add 3 for odd lengths */
4363 dword_len = (buf_len + 3) >> 2;
4365 /* Pull in the rest of the buffer (bi is where we left off)*/
4366 for (; bi <= dword_len; bi++) {
4367 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4368 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4376 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4377 * @hw: pointer to the HW structure
4378 * @maj: driver version major number
4379 * @min: driver version minor number
4380 * @build: driver version build number
4381 * @sub: driver version sub build number
4383 * Sends driver version number to firmware through the manageability
4384 * block. On success return IXGBE_SUCCESS
4385 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4386 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4388 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4391 struct ixgbe_hic_drv_info fw_cmd;
4393 s32 ret_val = IXGBE_SUCCESS;
4395 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4397 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
4399 ret_val = IXGBE_ERR_SWFW_SYNC;
4403 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4404 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4405 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4406 fw_cmd.port_num = (u8)hw->bus.func;
4407 fw_cmd.ver_maj = maj;
4408 fw_cmd.ver_min = min;
4409 fw_cmd.ver_build = build;
4410 fw_cmd.ver_sub = sub;
4411 fw_cmd.hdr.checksum = 0;
4412 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4413 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4417 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4418 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4420 if (ret_val != IXGBE_SUCCESS)
4423 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4424 FW_CEM_RESP_STATUS_SUCCESS)
4425 ret_val = IXGBE_SUCCESS;
4427 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4432 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4438 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4439 * @hw: pointer to hardware structure
4440 * @num_pb: number of packet buffers to allocate
4441 * @headroom: reserve n KB of headroom
4442 * @strategy: packet buffer allocation strategy
4444 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4447 u32 pbsize = hw->mac.rx_pb_size;
4449 u32 rxpktsize, txpktsize, txpbthresh;
4451 /* Reserve headroom */
4457 /* Divide remaining packet buffer space amongst the number of packet
4458 * buffers requested using supplied strategy.
4461 case PBA_STRATEGY_WEIGHTED:
4462 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4463 * buffer with 5/8 of the packet buffer space.
4465 rxpktsize = (pbsize * 5) / (num_pb * 4);
4466 pbsize -= rxpktsize * (num_pb / 2);
4467 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4468 for (; i < (num_pb / 2); i++)
4469 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4470 /* Fall through to configure remaining packet buffers */
4471 case PBA_STRATEGY_EQUAL:
4472 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4473 for (; i < num_pb; i++)
4474 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4480 /* Only support an equally distributed Tx packet buffer strategy. */
4481 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4482 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4483 for (i = 0; i < num_pb; i++) {
4484 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4485 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4488 /* Clear unused TCs, if any, to zero buffer size*/
4489 for (; i < IXGBE_MAX_PB; i++) {
4490 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4491 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4492 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4497 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4498 * @hw: pointer to the hardware structure
4500 * The 82599 and x540 MACs can experience issues if TX work is still pending
4501 * when a reset occurs. This function prevents this by flushing the PCIe
4502 * buffers on the system.
4504 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4506 u32 gcr_ext, hlreg0;
4509 * If double reset is not requested then all transactions should
4510 * already be clear and as such there is no work to do
4512 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4516 * Set loopback enable to prevent any transmits from being sent
4517 * should the link come up. This assumes that the RXCTRL.RXEN bit
4518 * has already been cleared.
4520 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4521 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4523 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4524 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4525 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4526 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4528 /* Flush all writes and allow 20usec for all transactions to clear */
4529 IXGBE_WRITE_FLUSH(hw);
4532 /* restore previous register values */
4533 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4534 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4539 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4540 * @hw: pointer to hardware structure
4541 * @map: pointer to u8 arr for returning map
4543 * Read the rtrup2tc HW register and resolve its content into map
4545 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4549 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4550 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4551 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4552 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
4556 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
4561 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4562 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4563 if (hw->mac.type != ixgbe_mac_82598EB) {
4564 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4565 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4566 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4567 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4568 hw->mac.set_lben = true;
4570 hw->mac.set_lben = false;
4573 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4574 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4578 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
4583 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4584 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
4586 if (hw->mac.type != ixgbe_mac_82598EB) {
4587 if (hw->mac.set_lben) {
4588 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4589 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
4590 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4591 hw->mac.set_lben = false;