1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
116 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
118 /* RAR, Multicast, VLAN */
119 mac->ops.set_rar = ixgbe_set_rar_generic;
120 mac->ops.clear_rar = ixgbe_clear_rar_generic;
121 mac->ops.insert_mac_addr = NULL;
122 mac->ops.set_vmdq = NULL;
123 mac->ops.clear_vmdq = NULL;
124 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 mac->ops.disable_mc = ixgbe_disable_mc_generic;
129 mac->ops.clear_vfta = NULL;
130 mac->ops.set_vfta = NULL;
131 mac->ops.set_vlvf = NULL;
132 mac->ops.init_uta_tables = NULL;
133 mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 mac->ops.disable_rx = ixgbe_disable_rx_generic;
137 mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 mac->ops.setup_fc = ixgbe_setup_fc_generic;
139 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 mac->ops.get_link_capabilities = NULL;
143 mac->ops.setup_link = NULL;
144 mac->ops.check_link = NULL;
145 mac->ops.dmac_config = NULL;
146 mac->ops.dmac_update_tcs = NULL;
147 mac->ops.dmac_config_tcs = NULL;
149 return IXGBE_SUCCESS;
153 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
155 * @hw: pointer to hardware structure
157 * This function returns true if the device supports flow control
158 * autonegotiation, and false if it does not.
161 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
163 bool supported = false;
164 ixgbe_link_speed speed;
167 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
169 switch (hw->phy.media_type) {
170 case ixgbe_media_type_fiber_qsfp:
171 case ixgbe_media_type_fiber:
172 /* flow control autoneg black list */
173 switch (hw->device_id) {
174 case IXGBE_DEV_ID_X550EM_A_SFP:
175 case IXGBE_DEV_ID_X550EM_A_SFP_N:
176 case IXGBE_DEV_ID_X550EM_A_QSFP:
177 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
181 hw->mac.ops.check_link(hw, &speed, &link_up, false);
182 /* if link is down, assume supported */
184 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
191 case ixgbe_media_type_backplane:
194 case ixgbe_media_type_copper:
195 /* only some copper devices support flow control autoneg */
196 switch (hw->device_id) {
197 case IXGBE_DEV_ID_82599_T3_LOM:
198 case IXGBE_DEV_ID_X540T:
199 case IXGBE_DEV_ID_X540T1:
200 case IXGBE_DEV_ID_X550T:
201 case IXGBE_DEV_ID_X550T1:
202 case IXGBE_DEV_ID_X550EM_X_10G_T:
203 case IXGBE_DEV_ID_X550EM_A_10G_T:
204 case IXGBE_DEV_ID_X550EM_A_1G_T:
205 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
216 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
217 "Device %x does not support flow control autoneg",
223 * ixgbe_setup_fc_generic - Set up flow control
224 * @hw: pointer to hardware structure
226 * Called at init time to set up flow control.
228 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
230 s32 ret_val = IXGBE_SUCCESS;
231 u32 reg = 0, reg_bp = 0;
235 DEBUGFUNC("ixgbe_setup_fc_generic");
237 /* Validate the requested mode */
238 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
239 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
240 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
241 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
246 * 10gig parts do not have a word in the EEPROM to determine the
247 * default flow control setting, so we explicitly set it to full.
249 if (hw->fc.requested_mode == ixgbe_fc_default)
250 hw->fc.requested_mode = ixgbe_fc_full;
253 * Set up the 1G and 10G flow control advertisement registers so the
254 * HW will be able to do fc autoneg once the cable is plugged in. If
255 * we link at 10G, the 1G advertisement is harmless and vice versa.
257 switch (hw->phy.media_type) {
258 case ixgbe_media_type_backplane:
259 /* some MAC's need RMW protection on AUTOC */
260 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
261 if (ret_val != IXGBE_SUCCESS)
264 /* only backplane uses autoc so fall though */
265 case ixgbe_media_type_fiber_qsfp:
266 case ixgbe_media_type_fiber:
267 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
270 case ixgbe_media_type_copper:
271 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
272 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
279 * The possible values of fc.requested_mode are:
280 * 0: Flow control is completely disabled
281 * 1: Rx flow control is enabled (we can receive pause frames,
282 * but not send pause frames).
283 * 2: Tx flow control is enabled (we can send pause frames but
284 * we do not support receiving pause frames).
285 * 3: Both Rx and Tx flow control (symmetric) are enabled.
288 switch (hw->fc.requested_mode) {
290 /* Flow control completely disabled by software override. */
291 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
292 if (hw->phy.media_type == ixgbe_media_type_backplane)
293 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
294 IXGBE_AUTOC_ASM_PAUSE);
295 else if (hw->phy.media_type == ixgbe_media_type_copper)
296 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
298 case ixgbe_fc_tx_pause:
300 * Tx Flow control is enabled, and Rx Flow control is
301 * disabled by software override.
303 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
304 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
305 if (hw->phy.media_type == ixgbe_media_type_backplane) {
306 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
307 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
308 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
309 reg_cu |= IXGBE_TAF_ASM_PAUSE;
310 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
313 case ixgbe_fc_rx_pause:
315 * Rx Flow control is enabled and Tx Flow control is
316 * disabled by software override. Since there really
317 * isn't a way to advertise that we are capable of RX
318 * Pause ONLY, we will advertise that we support both
319 * symmetric and asymmetric Rx PAUSE, as such we fall
320 * through to the fc_full statement. Later, we will
321 * disable the adapter's ability to send PAUSE frames.
324 /* Flow control (both Rx and Tx) is enabled by SW override. */
325 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
326 if (hw->phy.media_type == ixgbe_media_type_backplane)
327 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
328 IXGBE_AUTOC_ASM_PAUSE;
329 else if (hw->phy.media_type == ixgbe_media_type_copper)
330 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
333 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
334 "Flow control param set incorrectly\n");
335 ret_val = IXGBE_ERR_CONFIG;
340 if (hw->mac.type < ixgbe_mac_X540) {
342 * Enable auto-negotiation between the MAC & PHY;
343 * the MAC will advertise clause 37 flow control.
345 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
346 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
348 /* Disable AN timeout */
349 if (hw->fc.strict_ieee)
350 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
352 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
353 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
357 * AUTOC restart handles negotiation of 1G and 10G on backplane
358 * and copper. There is no need to set the PCS1GCTL register.
361 if (hw->phy.media_type == ixgbe_media_type_backplane) {
362 reg_bp |= IXGBE_AUTOC_AN_RESTART;
363 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
366 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
367 (ixgbe_device_supports_autoneg_fc(hw))) {
368 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
369 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
372 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
378 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
379 * @hw: pointer to hardware structure
381 * Starts the hardware by filling the bus info structure and media type, clears
382 * all on chip counters, initializes receive address registers, multicast
383 * table, VLAN filter table, calls routine to set up link and flow control
384 * settings, and leaves transmit and receive units disabled and uninitialized
386 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
392 DEBUGFUNC("ixgbe_start_hw_generic");
394 /* Set the media type */
395 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
397 /* PHY ops initialization must be done in reset_hw() */
399 /* Clear the VLAN filter table */
400 hw->mac.ops.clear_vfta(hw);
402 /* Clear statistics registers */
403 hw->mac.ops.clear_hw_cntrs(hw);
405 /* Set No Snoop Disable */
406 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
407 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
408 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
409 IXGBE_WRITE_FLUSH(hw);
411 /* Setup flow control */
412 ret_val = ixgbe_setup_fc(hw);
413 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
414 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
418 /* Cache bit indicating need for crosstalk fix */
419 switch (hw->mac.type) {
420 case ixgbe_mac_82599EB:
421 case ixgbe_mac_X550EM_x:
422 case ixgbe_mac_X550EM_a:
423 hw->mac.ops.get_device_caps(hw, &device_caps);
424 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
425 hw->need_crosstalk_fix = false;
427 hw->need_crosstalk_fix = true;
430 hw->need_crosstalk_fix = false;
434 /* Clear adapter stopped flag */
435 hw->adapter_stopped = false;
437 return IXGBE_SUCCESS;
441 * ixgbe_start_hw_gen2 - Init sequence for common device family
442 * @hw: pointer to hw structure
444 * Performs the init sequence common to the second generation
446 * Devices in the second generation:
450 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
455 /* Clear the rate limiters */
456 for (i = 0; i < hw->mac.max_tx_queues; i++) {
457 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
458 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
460 IXGBE_WRITE_FLUSH(hw);
462 /* Disable relaxed ordering */
463 for (i = 0; i < hw->mac.max_tx_queues; i++) {
464 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
465 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
466 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
469 for (i = 0; i < hw->mac.max_rx_queues; i++) {
470 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
471 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
472 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
473 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
476 return IXGBE_SUCCESS;
480 * ixgbe_init_hw_generic - Generic hardware initialization
481 * @hw: pointer to hardware structure
483 * Initialize the hardware by resetting the hardware, filling the bus info
484 * structure and media type, clears all on chip counters, initializes receive
485 * address registers, multicast table, VLAN filter table, calls routine to set
486 * up link and flow control settings, and leaves transmit and receive units
487 * disabled and uninitialized
489 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
493 DEBUGFUNC("ixgbe_init_hw_generic");
495 /* Reset the hardware */
496 status = hw->mac.ops.reset_hw(hw);
498 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
500 status = hw->mac.ops.start_hw(hw);
503 /* Initialize the LED link active for LED blink support */
504 hw->mac.ops.init_led_link_act(hw);
506 if (status != IXGBE_SUCCESS)
507 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
513 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
514 * @hw: pointer to hardware structure
516 * Clears all hardware statistics counters by reading them from the hardware
517 * Statistics counters are clear on read.
519 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
523 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
525 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
526 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
527 IXGBE_READ_REG(hw, IXGBE_ERRBC);
528 IXGBE_READ_REG(hw, IXGBE_MSPDC);
529 for (i = 0; i < 8; i++)
530 IXGBE_READ_REG(hw, IXGBE_MPC(i));
532 IXGBE_READ_REG(hw, IXGBE_MLFC);
533 IXGBE_READ_REG(hw, IXGBE_MRFC);
534 IXGBE_READ_REG(hw, IXGBE_RLEC);
535 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
536 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
537 if (hw->mac.type >= ixgbe_mac_82599EB) {
538 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
539 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
541 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
542 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
545 for (i = 0; i < 8; i++) {
546 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
547 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
548 if (hw->mac.type >= ixgbe_mac_82599EB) {
549 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
550 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
552 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
553 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
556 if (hw->mac.type >= ixgbe_mac_82599EB)
557 for (i = 0; i < 8; i++)
558 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
559 IXGBE_READ_REG(hw, IXGBE_PRC64);
560 IXGBE_READ_REG(hw, IXGBE_PRC127);
561 IXGBE_READ_REG(hw, IXGBE_PRC255);
562 IXGBE_READ_REG(hw, IXGBE_PRC511);
563 IXGBE_READ_REG(hw, IXGBE_PRC1023);
564 IXGBE_READ_REG(hw, IXGBE_PRC1522);
565 IXGBE_READ_REG(hw, IXGBE_GPRC);
566 IXGBE_READ_REG(hw, IXGBE_BPRC);
567 IXGBE_READ_REG(hw, IXGBE_MPRC);
568 IXGBE_READ_REG(hw, IXGBE_GPTC);
569 IXGBE_READ_REG(hw, IXGBE_GORCL);
570 IXGBE_READ_REG(hw, IXGBE_GORCH);
571 IXGBE_READ_REG(hw, IXGBE_GOTCL);
572 IXGBE_READ_REG(hw, IXGBE_GOTCH);
573 if (hw->mac.type == ixgbe_mac_82598EB)
574 for (i = 0; i < 8; i++)
575 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
576 IXGBE_READ_REG(hw, IXGBE_RUC);
577 IXGBE_READ_REG(hw, IXGBE_RFC);
578 IXGBE_READ_REG(hw, IXGBE_ROC);
579 IXGBE_READ_REG(hw, IXGBE_RJC);
580 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
581 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
582 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
583 IXGBE_READ_REG(hw, IXGBE_TORL);
584 IXGBE_READ_REG(hw, IXGBE_TORH);
585 IXGBE_READ_REG(hw, IXGBE_TPR);
586 IXGBE_READ_REG(hw, IXGBE_TPT);
587 IXGBE_READ_REG(hw, IXGBE_PTC64);
588 IXGBE_READ_REG(hw, IXGBE_PTC127);
589 IXGBE_READ_REG(hw, IXGBE_PTC255);
590 IXGBE_READ_REG(hw, IXGBE_PTC511);
591 IXGBE_READ_REG(hw, IXGBE_PTC1023);
592 IXGBE_READ_REG(hw, IXGBE_PTC1522);
593 IXGBE_READ_REG(hw, IXGBE_MPTC);
594 IXGBE_READ_REG(hw, IXGBE_BPTC);
595 for (i = 0; i < 16; i++) {
596 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
597 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
598 if (hw->mac.type >= ixgbe_mac_82599EB) {
599 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
600 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
601 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
602 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
603 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
605 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
606 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
610 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
612 ixgbe_identify_phy(hw);
613 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
614 IXGBE_MDIO_PCS_DEV_TYPE, &i);
615 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
616 IXGBE_MDIO_PCS_DEV_TYPE, &i);
617 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
618 IXGBE_MDIO_PCS_DEV_TYPE, &i);
619 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
620 IXGBE_MDIO_PCS_DEV_TYPE, &i);
623 return IXGBE_SUCCESS;
627 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
628 * @hw: pointer to hardware structure
629 * @pba_num: stores the part number string from the EEPROM
630 * @pba_num_size: part number string buffer length
632 * Reads the part number string from the EEPROM.
634 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
643 DEBUGFUNC("ixgbe_read_pba_string_generic");
645 if (pba_num == NULL) {
646 DEBUGOUT("PBA string buffer was null\n");
647 return IXGBE_ERR_INVALID_ARGUMENT;
650 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
652 DEBUGOUT("NVM Read Error\n");
656 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
658 DEBUGOUT("NVM Read Error\n");
663 * if data is not ptr guard the PBA must be in legacy format which
664 * means pba_ptr is actually our second data word for the PBA number
665 * and we can decode it into an ascii string
667 if (data != IXGBE_PBANUM_PTR_GUARD) {
668 DEBUGOUT("NVM PBA number is not stored as string\n");
670 /* we will need 11 characters to store the PBA */
671 if (pba_num_size < 11) {
672 DEBUGOUT("PBA string buffer too small\n");
673 return IXGBE_ERR_NO_SPACE;
676 /* extract hex string from data and pba_ptr */
677 pba_num[0] = (data >> 12) & 0xF;
678 pba_num[1] = (data >> 8) & 0xF;
679 pba_num[2] = (data >> 4) & 0xF;
680 pba_num[3] = data & 0xF;
681 pba_num[4] = (pba_ptr >> 12) & 0xF;
682 pba_num[5] = (pba_ptr >> 8) & 0xF;
685 pba_num[8] = (pba_ptr >> 4) & 0xF;
686 pba_num[9] = pba_ptr & 0xF;
688 /* put a null character on the end of our string */
691 /* switch all the data but the '-' to hex char */
692 for (offset = 0; offset < 10; offset++) {
693 if (pba_num[offset] < 0xA)
694 pba_num[offset] += '0';
695 else if (pba_num[offset] < 0x10)
696 pba_num[offset] += 'A' - 0xA;
699 return IXGBE_SUCCESS;
702 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
704 DEBUGOUT("NVM Read Error\n");
708 if (length == 0xFFFF || length == 0) {
709 DEBUGOUT("NVM PBA number section invalid length\n");
710 return IXGBE_ERR_PBA_SECTION;
713 /* check if pba_num buffer is big enough */
714 if (pba_num_size < (((u32)length * 2) - 1)) {
715 DEBUGOUT("PBA string buffer too small\n");
716 return IXGBE_ERR_NO_SPACE;
719 /* trim pba length from start of string */
723 for (offset = 0; offset < length; offset++) {
724 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
726 DEBUGOUT("NVM Read Error\n");
729 pba_num[offset * 2] = (u8)(data >> 8);
730 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
732 pba_num[offset * 2] = '\0';
734 return IXGBE_SUCCESS;
738 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
739 * @hw: pointer to hardware structure
740 * @pba_num: stores the part number from the EEPROM
742 * Reads the part number from the EEPROM.
744 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
749 DEBUGFUNC("ixgbe_read_pba_num_generic");
751 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
753 DEBUGOUT("NVM Read Error\n");
755 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
756 DEBUGOUT("NVM Not supported\n");
757 return IXGBE_NOT_IMPLEMENTED;
759 *pba_num = (u32)(data << 16);
761 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
763 DEBUGOUT("NVM Read Error\n");
768 return IXGBE_SUCCESS;
773 * @hw: pointer to the HW structure
774 * @eeprom_buf: optional pointer to EEPROM image
775 * @eeprom_buf_size: size of EEPROM image in words
776 * @max_pba_block_size: PBA block size limit
777 * @pba: pointer to output PBA structure
779 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
780 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
783 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
784 u32 eeprom_buf_size, u16 max_pba_block_size,
785 struct ixgbe_pba *pba)
791 return IXGBE_ERR_PARAM;
793 if (eeprom_buf == NULL) {
794 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
799 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
800 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
801 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
803 return IXGBE_ERR_PARAM;
807 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
808 if (pba->pba_block == NULL)
809 return IXGBE_ERR_PARAM;
811 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
817 if (pba_block_size > max_pba_block_size)
818 return IXGBE_ERR_PARAM;
820 if (eeprom_buf == NULL) {
821 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
827 if (eeprom_buf_size > (u32)(pba->word[1] +
829 memcpy(pba->pba_block,
830 &eeprom_buf[pba->word[1]],
831 pba_block_size * sizeof(u16));
833 return IXGBE_ERR_PARAM;
838 return IXGBE_SUCCESS;
842 * ixgbe_write_pba_raw
843 * @hw: pointer to the HW structure
844 * @eeprom_buf: optional pointer to EEPROM image
845 * @eeprom_buf_size: size of EEPROM image in words
846 * @pba: pointer to PBA structure
848 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
849 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
852 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
853 u32 eeprom_buf_size, struct ixgbe_pba *pba)
858 return IXGBE_ERR_PARAM;
860 if (eeprom_buf == NULL) {
861 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
866 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
867 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
868 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
870 return IXGBE_ERR_PARAM;
874 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
875 if (pba->pba_block == NULL)
876 return IXGBE_ERR_PARAM;
878 if (eeprom_buf == NULL) {
879 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
885 if (eeprom_buf_size > (u32)(pba->word[1] +
886 pba->pba_block[0])) {
887 memcpy(&eeprom_buf[pba->word[1]],
889 pba->pba_block[0] * sizeof(u16));
891 return IXGBE_ERR_PARAM;
896 return IXGBE_SUCCESS;
900 * ixgbe_get_pba_block_size
901 * @hw: pointer to the HW structure
902 * @eeprom_buf: optional pointer to EEPROM image
903 * @eeprom_buf_size: size of EEPROM image in words
904 * @pba_data_size: pointer to output variable
906 * Returns the size of the PBA block in words. Function operates on EEPROM
907 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
911 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
912 u32 eeprom_buf_size, u16 *pba_block_size)
918 DEBUGFUNC("ixgbe_get_pba_block_size");
920 if (eeprom_buf == NULL) {
921 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
926 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
927 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
928 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
930 return IXGBE_ERR_PARAM;
934 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
935 if (eeprom_buf == NULL) {
936 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
941 if (eeprom_buf_size > pba_word[1])
942 length = eeprom_buf[pba_word[1] + 0];
944 return IXGBE_ERR_PARAM;
947 if (length == 0xFFFF || length == 0)
948 return IXGBE_ERR_PBA_SECTION;
950 /* PBA number in legacy format, there is no PBA Block. */
954 if (pba_block_size != NULL)
955 *pba_block_size = length;
957 return IXGBE_SUCCESS;
961 * ixgbe_get_mac_addr_generic - Generic get MAC address
962 * @hw: pointer to hardware structure
963 * @mac_addr: Adapter MAC address
965 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
966 * A reset of the adapter must be performed prior to calling this function
967 * in order for the MAC address to have been loaded from the EEPROM into RAR0
969 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
975 DEBUGFUNC("ixgbe_get_mac_addr_generic");
977 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
978 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
980 for (i = 0; i < 4; i++)
981 mac_addr[i] = (u8)(rar_low >> (i*8));
983 for (i = 0; i < 2; i++)
984 mac_addr[i+4] = (u8)(rar_high >> (i*8));
986 return IXGBE_SUCCESS;
990 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
991 * @hw: pointer to hardware structure
992 * @link_status: the link status returned by the PCI config space
994 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
996 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
998 struct ixgbe_mac_info *mac = &hw->mac;
1000 if (hw->bus.type == ixgbe_bus_type_unknown)
1001 hw->bus.type = ixgbe_bus_type_pci_express;
1003 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1004 case IXGBE_PCI_LINK_WIDTH_1:
1005 hw->bus.width = ixgbe_bus_width_pcie_x1;
1007 case IXGBE_PCI_LINK_WIDTH_2:
1008 hw->bus.width = ixgbe_bus_width_pcie_x2;
1010 case IXGBE_PCI_LINK_WIDTH_4:
1011 hw->bus.width = ixgbe_bus_width_pcie_x4;
1013 case IXGBE_PCI_LINK_WIDTH_8:
1014 hw->bus.width = ixgbe_bus_width_pcie_x8;
1017 hw->bus.width = ixgbe_bus_width_unknown;
1021 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1022 case IXGBE_PCI_LINK_SPEED_2500:
1023 hw->bus.speed = ixgbe_bus_speed_2500;
1025 case IXGBE_PCI_LINK_SPEED_5000:
1026 hw->bus.speed = ixgbe_bus_speed_5000;
1028 case IXGBE_PCI_LINK_SPEED_8000:
1029 hw->bus.speed = ixgbe_bus_speed_8000;
1032 hw->bus.speed = ixgbe_bus_speed_unknown;
1036 mac->ops.set_lan_id(hw);
1040 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1041 * @hw: pointer to hardware structure
1043 * Gets the PCI bus info (speed, width, type) then calls helper function to
1044 * store this data within the ixgbe_hw structure.
1046 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1050 DEBUGFUNC("ixgbe_get_bus_info_generic");
1052 /* Get the negotiated link width and speed from PCI config space */
1053 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1055 ixgbe_set_pci_config_data_generic(hw, link_status);
1057 return IXGBE_SUCCESS;
1061 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1062 * @hw: pointer to the HW structure
1064 * Determines the LAN function id by reading memory-mapped registers and swaps
1065 * the port value if requested, and set MAC instance for devices that share
1068 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1070 struct ixgbe_bus_info *bus = &hw->bus;
1074 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1076 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1077 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1078 bus->lan_id = (u8)bus->func;
1080 /* check for a port swap */
1081 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1082 if (reg & IXGBE_FACTPS_LFS)
1085 /* Get MAC instance from EEPROM for configuring CS4227 */
1086 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1087 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1088 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1089 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1094 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1095 * @hw: pointer to hardware structure
1097 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1098 * disables transmit and receive units. The adapter_stopped flag is used by
1099 * the shared code and drivers to determine if the adapter is in a stopped
1100 * state and should not touch the hardware.
1102 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1107 DEBUGFUNC("ixgbe_stop_adapter_generic");
1110 * Set the adapter_stopped flag so other driver functions stop touching
1113 hw->adapter_stopped = true;
1115 /* Disable the receive unit */
1116 ixgbe_disable_rx(hw);
1118 /* Clear interrupt mask to stop interrupts from being generated */
1119 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1121 /* Clear any pending interrupts, flush previous writes */
1122 IXGBE_READ_REG(hw, IXGBE_EICR);
1124 /* Disable the transmit unit. Each queue must be disabled. */
1125 for (i = 0; i < hw->mac.max_tx_queues; i++)
1126 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1128 /* Disable the receive unit by stopping each queue */
1129 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1130 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1131 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1132 reg_val |= IXGBE_RXDCTL_SWFLSH;
1133 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1136 /* flush all queues disables */
1137 IXGBE_WRITE_FLUSH(hw);
1141 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1142 * access and verify no pending requests
1144 return ixgbe_disable_pcie_master(hw);
1148 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1149 * @hw: pointer to hardware structure
1151 * Store the index for the link active LED. This will be used to support
1154 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1156 struct ixgbe_mac_info *mac = &hw->mac;
1157 u32 led_reg, led_mode;
1160 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1162 /* Get LED link active from the LEDCTL register */
1163 for (i = 0; i < 4; i++) {
1164 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1166 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1167 IXGBE_LED_LINK_ACTIVE) {
1168 mac->led_link_act = i;
1169 return IXGBE_SUCCESS;
1174 * If LEDCTL register does not have the LED link active set, then use
1175 * known MAC defaults.
1177 switch (hw->mac.type) {
1178 case ixgbe_mac_X550EM_a:
1179 case ixgbe_mac_X550EM_x:
1180 mac->led_link_act = 1;
1183 mac->led_link_act = 2;
1185 return IXGBE_SUCCESS;
1189 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1190 * @hw: pointer to hardware structure
1191 * @index: led number to turn on
1193 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1195 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1197 DEBUGFUNC("ixgbe_led_on_generic");
1200 return IXGBE_ERR_PARAM;
1202 /* To turn on the LED, set mode to ON. */
1203 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1204 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1205 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1206 IXGBE_WRITE_FLUSH(hw);
1208 return IXGBE_SUCCESS;
1212 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1213 * @hw: pointer to hardware structure
1214 * @index: led number to turn off
1216 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1218 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1220 DEBUGFUNC("ixgbe_led_off_generic");
1223 return IXGBE_ERR_PARAM;
1225 /* To turn off the LED, set mode to OFF. */
1226 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1227 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1228 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1229 IXGBE_WRITE_FLUSH(hw);
1231 return IXGBE_SUCCESS;
1235 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1236 * @hw: pointer to hardware structure
1238 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1239 * ixgbe_hw struct in order to set up EEPROM access.
1241 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1243 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1247 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1249 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1250 eeprom->type = ixgbe_eeprom_none;
1251 /* Set default semaphore delay to 10ms which is a well
1253 eeprom->semaphore_delay = 10;
1254 /* Clear EEPROM page size, it will be initialized as needed */
1255 eeprom->word_page_size = 0;
1258 * Check for EEPROM present first.
1259 * If not present leave as none
1261 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1262 if (eec & IXGBE_EEC_PRES) {
1263 eeprom->type = ixgbe_eeprom_spi;
1266 * SPI EEPROM is assumed here. This code would need to
1267 * change if a future EEPROM is not SPI.
1269 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1270 IXGBE_EEC_SIZE_SHIFT);
1271 eeprom->word_size = 1 << (eeprom_size +
1272 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1275 if (eec & IXGBE_EEC_ADDR_SIZE)
1276 eeprom->address_bits = 16;
1278 eeprom->address_bits = 8;
1279 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1280 "%d\n", eeprom->type, eeprom->word_size,
1281 eeprom->address_bits);
1284 return IXGBE_SUCCESS;
1288 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1289 * @hw: pointer to hardware structure
1290 * @offset: offset within the EEPROM to write
1291 * @words: number of word(s)
1292 * @data: 16 bit word(s) to write to EEPROM
1294 * Reads 16 bit word(s) from EEPROM through bit-bang method
1296 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1297 u16 words, u16 *data)
1299 s32 status = IXGBE_SUCCESS;
1302 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1304 hw->eeprom.ops.init_params(hw);
1307 status = IXGBE_ERR_INVALID_ARGUMENT;
1311 if (offset + words > hw->eeprom.word_size) {
1312 status = IXGBE_ERR_EEPROM;
1317 * The EEPROM page size cannot be queried from the chip. We do lazy
1318 * initialization. It is worth to do that when we write large buffer.
1320 if ((hw->eeprom.word_page_size == 0) &&
1321 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1322 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1325 * We cannot hold synchronization semaphores for too long
1326 * to avoid other entity starvation. However it is more efficient
1327 * to read in bursts than synchronizing access for each word.
1329 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1330 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1331 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1332 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1335 if (status != IXGBE_SUCCESS)
1344 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1345 * @hw: pointer to hardware structure
1346 * @offset: offset within the EEPROM to be written to
1347 * @words: number of word(s)
1348 * @data: 16 bit word(s) to be written to the EEPROM
1350 * If ixgbe_eeprom_update_checksum is not called after this function, the
1351 * EEPROM will most likely contain an invalid checksum.
1353 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1354 u16 words, u16 *data)
1360 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1362 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1364 /* Prepare the EEPROM for writing */
1365 status = ixgbe_acquire_eeprom(hw);
1367 if (status == IXGBE_SUCCESS) {
1368 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1369 ixgbe_release_eeprom(hw);
1370 status = IXGBE_ERR_EEPROM;
1374 if (status == IXGBE_SUCCESS) {
1375 for (i = 0; i < words; i++) {
1376 ixgbe_standby_eeprom(hw);
1378 /* Send the WRITE ENABLE command (8 bit opcode ) */
1379 ixgbe_shift_out_eeprom_bits(hw,
1380 IXGBE_EEPROM_WREN_OPCODE_SPI,
1381 IXGBE_EEPROM_OPCODE_BITS);
1383 ixgbe_standby_eeprom(hw);
1386 * Some SPI eeproms use the 8th address bit embedded
1389 if ((hw->eeprom.address_bits == 8) &&
1390 ((offset + i) >= 128))
1391 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1393 /* Send the Write command (8-bit opcode + addr) */
1394 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1395 IXGBE_EEPROM_OPCODE_BITS);
1396 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1397 hw->eeprom.address_bits);
1399 page_size = hw->eeprom.word_page_size;
1401 /* Send the data in burst via SPI*/
1404 word = (word >> 8) | (word << 8);
1405 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1410 /* do not wrap around page */
1411 if (((offset + i) & (page_size - 1)) ==
1414 } while (++i < words);
1416 ixgbe_standby_eeprom(hw);
1419 /* Done with writing - release the EEPROM */
1420 ixgbe_release_eeprom(hw);
1427 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1428 * @hw: pointer to hardware structure
1429 * @offset: offset within the EEPROM to be written to
1430 * @data: 16 bit word to be written to the EEPROM
1432 * If ixgbe_eeprom_update_checksum is not called after this function, the
1433 * EEPROM will most likely contain an invalid checksum.
1435 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1439 DEBUGFUNC("ixgbe_write_eeprom_generic");
1441 hw->eeprom.ops.init_params(hw);
1443 if (offset >= hw->eeprom.word_size) {
1444 status = IXGBE_ERR_EEPROM;
1448 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1455 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1456 * @hw: pointer to hardware structure
1457 * @offset: offset within the EEPROM to be read
1458 * @data: read 16 bit words(s) from EEPROM
1459 * @words: number of word(s)
1461 * Reads 16 bit word(s) from EEPROM through bit-bang method
1463 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1464 u16 words, u16 *data)
1466 s32 status = IXGBE_SUCCESS;
1469 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1471 hw->eeprom.ops.init_params(hw);
1474 status = IXGBE_ERR_INVALID_ARGUMENT;
1478 if (offset + words > hw->eeprom.word_size) {
1479 status = IXGBE_ERR_EEPROM;
1484 * We cannot hold synchronization semaphores for too long
1485 * to avoid other entity starvation. However it is more efficient
1486 * to read in bursts than synchronizing access for each word.
1488 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1489 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1490 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1492 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1495 if (status != IXGBE_SUCCESS)
1504 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1505 * @hw: pointer to hardware structure
1506 * @offset: offset within the EEPROM to be read
1507 * @words: number of word(s)
1508 * @data: read 16 bit word(s) from EEPROM
1510 * Reads 16 bit word(s) from EEPROM through bit-bang method
1512 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1513 u16 words, u16 *data)
1517 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1520 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1522 /* Prepare the EEPROM for reading */
1523 status = ixgbe_acquire_eeprom(hw);
1525 if (status == IXGBE_SUCCESS) {
1526 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1527 ixgbe_release_eeprom(hw);
1528 status = IXGBE_ERR_EEPROM;
1532 if (status == IXGBE_SUCCESS) {
1533 for (i = 0; i < words; i++) {
1534 ixgbe_standby_eeprom(hw);
1536 * Some SPI eeproms use the 8th address bit embedded
1539 if ((hw->eeprom.address_bits == 8) &&
1540 ((offset + i) >= 128))
1541 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1543 /* Send the READ command (opcode + addr) */
1544 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1545 IXGBE_EEPROM_OPCODE_BITS);
1546 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1547 hw->eeprom.address_bits);
1549 /* Read the data. */
1550 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1551 data[i] = (word_in >> 8) | (word_in << 8);
1554 /* End this read operation */
1555 ixgbe_release_eeprom(hw);
1562 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1563 * @hw: pointer to hardware structure
1564 * @offset: offset within the EEPROM to be read
1565 * @data: read 16 bit value from EEPROM
1567 * Reads 16 bit value from EEPROM through bit-bang method
1569 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1574 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1576 hw->eeprom.ops.init_params(hw);
1578 if (offset >= hw->eeprom.word_size) {
1579 status = IXGBE_ERR_EEPROM;
1583 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1590 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1591 * @hw: pointer to hardware structure
1592 * @offset: offset of word in the EEPROM to read
1593 * @words: number of word(s)
1594 * @data: 16 bit word(s) from the EEPROM
1596 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1598 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1599 u16 words, u16 *data)
1602 s32 status = IXGBE_SUCCESS;
1605 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1607 hw->eeprom.ops.init_params(hw);
1610 status = IXGBE_ERR_INVALID_ARGUMENT;
1611 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1615 if (offset >= hw->eeprom.word_size) {
1616 status = IXGBE_ERR_EEPROM;
1617 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1621 for (i = 0; i < words; i++) {
1622 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1623 IXGBE_EEPROM_RW_REG_START;
1625 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1626 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1628 if (status == IXGBE_SUCCESS) {
1629 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1630 IXGBE_EEPROM_RW_REG_DATA);
1632 DEBUGOUT("Eeprom read timed out\n");
1641 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1642 * @hw: pointer to hardware structure
1643 * @offset: offset within the EEPROM to be used as a scratch pad
1645 * Discover EEPROM page size by writing marching data at given offset.
1646 * This function is called only when we are writing a new large buffer
1647 * at given offset so the data would be overwritten anyway.
1649 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1652 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1653 s32 status = IXGBE_SUCCESS;
1656 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1658 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1661 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1662 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1663 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1664 hw->eeprom.word_page_size = 0;
1665 if (status != IXGBE_SUCCESS)
1668 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1669 if (status != IXGBE_SUCCESS)
1673 * When writing in burst more than the actual page size
1674 * EEPROM address wraps around current page.
1676 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1678 DEBUGOUT1("Detected EEPROM page size = %d words.",
1679 hw->eeprom.word_page_size);
1685 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1686 * @hw: pointer to hardware structure
1687 * @offset: offset of word in the EEPROM to read
1688 * @data: word read from the EEPROM
1690 * Reads a 16 bit word from the EEPROM using the EERD register.
1692 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1694 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1698 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1699 * @hw: pointer to hardware structure
1700 * @offset: offset of word in the EEPROM to write
1701 * @words: number of word(s)
1702 * @data: word(s) write to the EEPROM
1704 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1706 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1707 u16 words, u16 *data)
1710 s32 status = IXGBE_SUCCESS;
1713 DEBUGFUNC("ixgbe_write_eewr_generic");
1715 hw->eeprom.ops.init_params(hw);
1718 status = IXGBE_ERR_INVALID_ARGUMENT;
1719 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1723 if (offset >= hw->eeprom.word_size) {
1724 status = IXGBE_ERR_EEPROM;
1725 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1729 for (i = 0; i < words; i++) {
1730 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1731 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1732 IXGBE_EEPROM_RW_REG_START;
1734 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1735 if (status != IXGBE_SUCCESS) {
1736 DEBUGOUT("Eeprom write EEWR timed out\n");
1740 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1742 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1743 if (status != IXGBE_SUCCESS) {
1744 DEBUGOUT("Eeprom write EEWR timed out\n");
1754 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1755 * @hw: pointer to hardware structure
1756 * @offset: offset of word in the EEPROM to write
1757 * @data: word write to the EEPROM
1759 * Write a 16 bit word to the EEPROM using the EEWR register.
1761 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1763 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1767 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1768 * @hw: pointer to hardware structure
1769 * @ee_reg: EEPROM flag for polling
1771 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1772 * read or write is done respectively.
1774 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1778 s32 status = IXGBE_ERR_EEPROM;
1780 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1782 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1783 if (ee_reg == IXGBE_NVM_POLL_READ)
1784 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1786 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1788 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1789 status = IXGBE_SUCCESS;
1795 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1796 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1797 "EEPROM read/write done polling timed out");
1803 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1804 * @hw: pointer to hardware structure
1806 * Prepares EEPROM for access using bit-bang method. This function should
1807 * be called before issuing a command to the EEPROM.
1809 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1811 s32 status = IXGBE_SUCCESS;
1815 DEBUGFUNC("ixgbe_acquire_eeprom");
1817 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1819 status = IXGBE_ERR_SWFW_SYNC;
1821 if (status == IXGBE_SUCCESS) {
1822 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1824 /* Request EEPROM Access */
1825 eec |= IXGBE_EEC_REQ;
1826 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1828 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1829 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1830 if (eec & IXGBE_EEC_GNT)
1835 /* Release if grant not acquired */
1836 if (!(eec & IXGBE_EEC_GNT)) {
1837 eec &= ~IXGBE_EEC_REQ;
1838 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1839 DEBUGOUT("Could not acquire EEPROM grant\n");
1841 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1842 status = IXGBE_ERR_EEPROM;
1845 /* Setup EEPROM for Read/Write */
1846 if (status == IXGBE_SUCCESS) {
1847 /* Clear CS and SK */
1848 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1849 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1850 IXGBE_WRITE_FLUSH(hw);
1858 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1859 * @hw: pointer to hardware structure
1861 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1863 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1865 s32 status = IXGBE_ERR_EEPROM;
1870 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1873 /* Get SMBI software semaphore between device drivers first */
1874 for (i = 0; i < timeout; i++) {
1876 * If the SMBI bit is 0 when we read it, then the bit will be
1877 * set and we have the semaphore
1879 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1880 if (!(swsm & IXGBE_SWSM_SMBI)) {
1881 status = IXGBE_SUCCESS;
1888 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1891 * this release is particularly important because our attempts
1892 * above to get the semaphore may have succeeded, and if there
1893 * was a timeout, we should unconditionally clear the semaphore
1894 * bits to free the driver to make progress
1896 ixgbe_release_eeprom_semaphore(hw);
1901 * If the SMBI bit is 0 when we read it, then the bit will be
1902 * set and we have the semaphore
1904 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1905 if (!(swsm & IXGBE_SWSM_SMBI))
1906 status = IXGBE_SUCCESS;
1909 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1910 if (status == IXGBE_SUCCESS) {
1911 for (i = 0; i < timeout; i++) {
1912 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1914 /* Set the SW EEPROM semaphore bit to request access */
1915 swsm |= IXGBE_SWSM_SWESMBI;
1916 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1919 * If we set the bit successfully then we got the
1922 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1923 if (swsm & IXGBE_SWSM_SWESMBI)
1930 * Release semaphores and return error if SW EEPROM semaphore
1931 * was not granted because we don't have access to the EEPROM
1934 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1935 "SWESMBI Software EEPROM semaphore not granted.\n");
1936 ixgbe_release_eeprom_semaphore(hw);
1937 status = IXGBE_ERR_EEPROM;
1940 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1941 "Software semaphore SMBI between device drivers "
1949 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1950 * @hw: pointer to hardware structure
1952 * This function clears hardware semaphore bits.
1954 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1958 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1960 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1962 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1963 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1964 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1965 IXGBE_WRITE_FLUSH(hw);
1969 * ixgbe_ready_eeprom - Polls for EEPROM ready
1970 * @hw: pointer to hardware structure
1972 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1974 s32 status = IXGBE_SUCCESS;
1978 DEBUGFUNC("ixgbe_ready_eeprom");
1981 * Read "Status Register" repeatedly until the LSB is cleared. The
1982 * EEPROM will signal that the command has been completed by clearing
1983 * bit 0 of the internal status register. If it's not cleared within
1984 * 5 milliseconds, then error out.
1986 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1987 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1988 IXGBE_EEPROM_OPCODE_BITS);
1989 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1990 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1994 ixgbe_standby_eeprom(hw);
1998 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1999 * devices (and only 0-5mSec on 5V devices)
2001 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2002 DEBUGOUT("SPI EEPROM Status error\n");
2003 status = IXGBE_ERR_EEPROM;
2010 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2011 * @hw: pointer to hardware structure
2013 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2017 DEBUGFUNC("ixgbe_standby_eeprom");
2019 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2021 /* Toggle CS to flush commands */
2022 eec |= IXGBE_EEC_CS;
2023 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2024 IXGBE_WRITE_FLUSH(hw);
2026 eec &= ~IXGBE_EEC_CS;
2027 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2028 IXGBE_WRITE_FLUSH(hw);
2033 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2034 * @hw: pointer to hardware structure
2035 * @data: data to send to the EEPROM
2036 * @count: number of bits to shift out
2038 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2045 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2047 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2050 * Mask is used to shift "count" bits of "data" out to the EEPROM
2051 * one bit at a time. Determine the starting bit based on count
2053 mask = 0x01 << (count - 1);
2055 for (i = 0; i < count; i++) {
2057 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2058 * "1", and then raising and then lowering the clock (the SK
2059 * bit controls the clock input to the EEPROM). A "0" is
2060 * shifted out to the EEPROM by setting "DI" to "0" and then
2061 * raising and then lowering the clock.
2064 eec |= IXGBE_EEC_DI;
2066 eec &= ~IXGBE_EEC_DI;
2068 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2069 IXGBE_WRITE_FLUSH(hw);
2073 ixgbe_raise_eeprom_clk(hw, &eec);
2074 ixgbe_lower_eeprom_clk(hw, &eec);
2077 * Shift mask to signify next bit of data to shift in to the
2083 /* We leave the "DI" bit set to "0" when we leave this routine. */
2084 eec &= ~IXGBE_EEC_DI;
2085 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2086 IXGBE_WRITE_FLUSH(hw);
2090 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2091 * @hw: pointer to hardware structure
2093 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2099 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2102 * In order to read a register from the EEPROM, we need to shift
2103 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2104 * the clock input to the EEPROM (setting the SK bit), and then reading
2105 * the value of the "DO" bit. During this "shifting in" process the
2106 * "DI" bit should always be clear.
2108 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2110 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2112 for (i = 0; i < count; i++) {
2114 ixgbe_raise_eeprom_clk(hw, &eec);
2116 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2118 eec &= ~(IXGBE_EEC_DI);
2119 if (eec & IXGBE_EEC_DO)
2122 ixgbe_lower_eeprom_clk(hw, &eec);
2129 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2130 * @hw: pointer to hardware structure
2131 * @eec: EEC register's current value
2133 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2135 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2138 * Raise the clock input to the EEPROM
2139 * (setting the SK bit), then delay
2141 *eec = *eec | IXGBE_EEC_SK;
2142 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2143 IXGBE_WRITE_FLUSH(hw);
2148 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2149 * @hw: pointer to hardware structure
2150 * @eecd: EECD's current value
2152 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2154 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2157 * Lower the clock input to the EEPROM (clearing the SK bit), then
2160 *eec = *eec & ~IXGBE_EEC_SK;
2161 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2162 IXGBE_WRITE_FLUSH(hw);
2167 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2168 * @hw: pointer to hardware structure
2170 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2174 DEBUGFUNC("ixgbe_release_eeprom");
2176 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2178 eec |= IXGBE_EEC_CS; /* Pull CS high */
2179 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2181 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2182 IXGBE_WRITE_FLUSH(hw);
2186 /* Stop requesting EEPROM access */
2187 eec &= ~IXGBE_EEC_REQ;
2188 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2190 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2192 /* Delay before attempt to obtain semaphore again to allow FW access */
2193 msec_delay(hw->eeprom.semaphore_delay);
2197 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2198 * @hw: pointer to hardware structure
2200 * Returns a negative error code on error, or the 16-bit checksum
2202 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2211 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2213 /* Include 0x0-0x3F in the checksum */
2214 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2215 if (hw->eeprom.ops.read(hw, i, &word)) {
2216 DEBUGOUT("EEPROM read failed\n");
2217 return IXGBE_ERR_EEPROM;
2222 /* Include all data from pointers except for the fw pointer */
2223 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2224 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2225 DEBUGOUT("EEPROM read failed\n");
2226 return IXGBE_ERR_EEPROM;
2229 /* If the pointer seems invalid */
2230 if (pointer == 0xFFFF || pointer == 0)
2233 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2234 DEBUGOUT("EEPROM read failed\n");
2235 return IXGBE_ERR_EEPROM;
2238 if (length == 0xFFFF || length == 0)
2241 for (j = pointer + 1; j <= pointer + length; j++) {
2242 if (hw->eeprom.ops.read(hw, j, &word)) {
2243 DEBUGOUT("EEPROM read failed\n");
2244 return IXGBE_ERR_EEPROM;
2250 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2252 return (s32)checksum;
2256 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2257 * @hw: pointer to hardware structure
2258 * @checksum_val: calculated checksum
2260 * Performs checksum calculation and validates the EEPROM checksum. If the
2261 * caller does not need checksum_val, the value can be NULL.
2263 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2268 u16 read_checksum = 0;
2270 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2272 /* Read the first word from the EEPROM. If this times out or fails, do
2273 * not continue or we could be in for a very long wait while every
2276 status = hw->eeprom.ops.read(hw, 0, &checksum);
2278 DEBUGOUT("EEPROM read failed\n");
2282 status = hw->eeprom.ops.calc_checksum(hw);
2286 checksum = (u16)(status & 0xffff);
2288 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2290 DEBUGOUT("EEPROM read failed\n");
2294 /* Verify read checksum from EEPROM is the same as
2295 * calculated checksum
2297 if (read_checksum != checksum)
2298 status = IXGBE_ERR_EEPROM_CHECKSUM;
2300 /* If the user cares, return the calculated checksum */
2302 *checksum_val = checksum;
2308 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2309 * @hw: pointer to hardware structure
2311 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2316 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2318 /* Read the first word from the EEPROM. If this times out or fails, do
2319 * not continue or we could be in for a very long wait while every
2322 status = hw->eeprom.ops.read(hw, 0, &checksum);
2324 DEBUGOUT("EEPROM read failed\n");
2328 status = hw->eeprom.ops.calc_checksum(hw);
2332 checksum = (u16)(status & 0xffff);
2334 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2340 * ixgbe_validate_mac_addr - Validate MAC address
2341 * @mac_addr: pointer to MAC address.
2343 * Tests a MAC address to ensure it is a valid Individual Address.
2345 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2347 s32 status = IXGBE_SUCCESS;
2349 DEBUGFUNC("ixgbe_validate_mac_addr");
2351 /* Make sure it is not a multicast address */
2352 if (IXGBE_IS_MULTICAST(mac_addr)) {
2353 status = IXGBE_ERR_INVALID_MAC_ADDR;
2354 /* Not a broadcast address */
2355 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2356 status = IXGBE_ERR_INVALID_MAC_ADDR;
2357 /* Reject the zero address */
2358 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2359 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2360 status = IXGBE_ERR_INVALID_MAC_ADDR;
2366 * ixgbe_set_rar_generic - Set Rx address register
2367 * @hw: pointer to hardware structure
2368 * @index: Receive address register to write
2369 * @addr: Address to put into receive address register
2370 * @vmdq: VMDq "set" or "pool" index
2371 * @enable_addr: set flag that address is active
2373 * Puts an ethernet address into a receive address register.
2375 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2378 u32 rar_low, rar_high;
2379 u32 rar_entries = hw->mac.num_rar_entries;
2381 DEBUGFUNC("ixgbe_set_rar_generic");
2383 /* Make sure we are using a valid rar index range */
2384 if (index >= rar_entries) {
2385 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2386 "RAR index %d is out of range.\n", index);
2387 return IXGBE_ERR_INVALID_ARGUMENT;
2390 /* setup VMDq pool selection before this RAR gets enabled */
2391 hw->mac.ops.set_vmdq(hw, index, vmdq);
2394 * HW expects these in little endian so we reverse the byte
2395 * order from network order (big endian) to little endian
2397 rar_low = ((u32)addr[0] |
2398 ((u32)addr[1] << 8) |
2399 ((u32)addr[2] << 16) |
2400 ((u32)addr[3] << 24));
2402 * Some parts put the VMDq setting in the extra RAH bits,
2403 * so save everything except the lower 16 bits that hold part
2404 * of the address and the address valid bit.
2406 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2407 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2408 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2410 if (enable_addr != 0)
2411 rar_high |= IXGBE_RAH_AV;
2413 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2414 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2416 return IXGBE_SUCCESS;
2420 * ixgbe_clear_rar_generic - Remove Rx address register
2421 * @hw: pointer to hardware structure
2422 * @index: Receive address register to write
2424 * Clears an ethernet address from a receive address register.
2426 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2429 u32 rar_entries = hw->mac.num_rar_entries;
2431 DEBUGFUNC("ixgbe_clear_rar_generic");
2433 /* Make sure we are using a valid rar index range */
2434 if (index >= rar_entries) {
2435 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2436 "RAR index %d is out of range.\n", index);
2437 return IXGBE_ERR_INVALID_ARGUMENT;
2441 * Some parts put the VMDq setting in the extra RAH bits,
2442 * so save everything except the lower 16 bits that hold part
2443 * of the address and the address valid bit.
2445 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2446 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2448 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2449 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2451 /* clear VMDq pool/queue selection for this RAR */
2452 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2454 return IXGBE_SUCCESS;
2458 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2459 * @hw: pointer to hardware structure
2461 * Places the MAC address in receive address register 0 and clears the rest
2462 * of the receive address registers. Clears the multicast table. Assumes
2463 * the receiver is in reset when the routine is called.
2465 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2468 u32 rar_entries = hw->mac.num_rar_entries;
2470 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2473 * If the current mac address is valid, assume it is a software override
2474 * to the permanent address.
2475 * Otherwise, use the permanent address from the eeprom.
2477 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2478 IXGBE_ERR_INVALID_MAC_ADDR) {
2479 /* Get the MAC address from the RAR0 for later reference */
2480 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2482 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2483 hw->mac.addr[0], hw->mac.addr[1],
2485 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2486 hw->mac.addr[4], hw->mac.addr[5]);
2488 /* Setup the receive address. */
2489 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2490 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2491 hw->mac.addr[0], hw->mac.addr[1],
2493 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2494 hw->mac.addr[4], hw->mac.addr[5]);
2496 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2499 /* clear VMDq pool/queue selection for RAR 0 */
2500 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2502 hw->addr_ctrl.overflow_promisc = 0;
2504 hw->addr_ctrl.rar_used_count = 1;
2506 /* Zero out the other receive addresses. */
2507 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2508 for (i = 1; i < rar_entries; i++) {
2509 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2510 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2514 hw->addr_ctrl.mta_in_use = 0;
2515 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2517 DEBUGOUT(" Clearing MTA\n");
2518 for (i = 0; i < hw->mac.mcft_size; i++)
2519 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2521 ixgbe_init_uta_tables(hw);
2523 return IXGBE_SUCCESS;
2527 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2528 * @hw: pointer to hardware structure
2529 * @addr: new address
2531 * Adds it to unused receive address register or goes into promiscuous mode.
2533 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2535 u32 rar_entries = hw->mac.num_rar_entries;
2538 DEBUGFUNC("ixgbe_add_uc_addr");
2540 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2541 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2544 * Place this address in the RAR if there is room,
2545 * else put the controller into promiscuous mode
2547 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2548 rar = hw->addr_ctrl.rar_used_count;
2549 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2550 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2551 hw->addr_ctrl.rar_used_count++;
2553 hw->addr_ctrl.overflow_promisc++;
2556 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2560 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2561 * @hw: pointer to hardware structure
2562 * @addr_list: the list of new addresses
2563 * @addr_count: number of addresses
2564 * @next: iterator function to walk the address list
2566 * The given list replaces any existing list. Clears the secondary addrs from
2567 * receive address registers. Uses unused receive address registers for the
2568 * first secondary addresses, and falls back to promiscuous mode as needed.
2570 * Drivers using secondary unicast addresses must set user_set_promisc when
2571 * manually putting the device into promiscuous mode.
2573 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2574 u32 addr_count, ixgbe_mc_addr_itr next)
2578 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2583 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2586 * Clear accounting of old secondary address list,
2587 * don't count RAR[0]
2589 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2590 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2591 hw->addr_ctrl.overflow_promisc = 0;
2593 /* Zero out the other receive addresses */
2594 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2595 for (i = 0; i < uc_addr_in_use; i++) {
2596 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2597 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2600 /* Add the new addresses */
2601 for (i = 0; i < addr_count; i++) {
2602 DEBUGOUT(" Adding the secondary addresses:\n");
2603 addr = next(hw, &addr_list, &vmdq);
2604 ixgbe_add_uc_addr(hw, addr, vmdq);
2607 if (hw->addr_ctrl.overflow_promisc) {
2608 /* enable promisc if not already in overflow or set by user */
2609 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2610 DEBUGOUT(" Entering address overflow promisc mode\n");
2611 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2612 fctrl |= IXGBE_FCTRL_UPE;
2613 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2616 /* only disable if set by overflow, not by user */
2617 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2618 DEBUGOUT(" Leaving address overflow promisc mode\n");
2619 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2620 fctrl &= ~IXGBE_FCTRL_UPE;
2621 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2625 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2626 return IXGBE_SUCCESS;
2630 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2631 * @hw: pointer to hardware structure
2632 * @mc_addr: the multicast address
2634 * Extracts the 12 bits, from a multicast address, to determine which
2635 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2636 * incoming rx multicast addresses, to determine the bit-vector to check in
2637 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2638 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2639 * to mc_filter_type.
2641 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2645 DEBUGFUNC("ixgbe_mta_vector");
2647 switch (hw->mac.mc_filter_type) {
2648 case 0: /* use bits [47:36] of the address */
2649 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2651 case 1: /* use bits [46:35] of the address */
2652 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2654 case 2: /* use bits [45:34] of the address */
2655 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2657 case 3: /* use bits [43:32] of the address */
2658 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2660 default: /* Invalid mc_filter_type */
2661 DEBUGOUT("MC filter type param set incorrectly\n");
2666 /* vector can only be 12-bits or boundary will be exceeded */
2672 * ixgbe_set_mta - Set bit-vector in multicast table
2673 * @hw: pointer to hardware structure
2674 * @hash_value: Multicast address hash value
2676 * Sets the bit-vector in the multicast table.
2678 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2684 DEBUGFUNC("ixgbe_set_mta");
2686 hw->addr_ctrl.mta_in_use++;
2688 vector = ixgbe_mta_vector(hw, mc_addr);
2689 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2692 * The MTA is a register array of 128 32-bit registers. It is treated
2693 * like an array of 4096 bits. We want to set bit
2694 * BitArray[vector_value]. So we figure out what register the bit is
2695 * in, read it, OR in the new bit, then write back the new value. The
2696 * register is determined by the upper 7 bits of the vector value and
2697 * the bit within that register are determined by the lower 5 bits of
2700 vector_reg = (vector >> 5) & 0x7F;
2701 vector_bit = vector & 0x1F;
2702 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2706 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2707 * @hw: pointer to hardware structure
2708 * @mc_addr_list: the list of new multicast addresses
2709 * @mc_addr_count: number of addresses
2710 * @next: iterator function to walk the multicast address list
2711 * @clear: flag, when set clears the table beforehand
2713 * When the clear flag is set, the given list replaces any existing list.
2714 * Hashes the given addresses into the multicast table.
2716 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2717 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2723 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2726 * Set the new number of MC addresses that we are being requested to
2729 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2730 hw->addr_ctrl.mta_in_use = 0;
2732 /* Clear mta_shadow */
2734 DEBUGOUT(" Clearing MTA\n");
2735 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2738 /* Update mta_shadow */
2739 for (i = 0; i < mc_addr_count; i++) {
2740 DEBUGOUT(" Adding the multicast addresses:\n");
2741 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2745 for (i = 0; i < hw->mac.mcft_size; i++)
2746 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2747 hw->mac.mta_shadow[i]);
2749 if (hw->addr_ctrl.mta_in_use > 0)
2750 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2751 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2753 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2754 return IXGBE_SUCCESS;
2758 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2759 * @hw: pointer to hardware structure
2761 * Enables multicast address in RAR and the use of the multicast hash table.
2763 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2765 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2767 DEBUGFUNC("ixgbe_enable_mc_generic");
2769 if (a->mta_in_use > 0)
2770 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2771 hw->mac.mc_filter_type);
2773 return IXGBE_SUCCESS;
2777 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2778 * @hw: pointer to hardware structure
2780 * Disables multicast address in RAR and the use of the multicast hash table.
2782 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2784 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2786 DEBUGFUNC("ixgbe_disable_mc_generic");
2788 if (a->mta_in_use > 0)
2789 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2791 return IXGBE_SUCCESS;
2795 * ixgbe_fc_enable_generic - Enable flow control
2796 * @hw: pointer to hardware structure
2798 * Enable flow control according to the current settings.
2800 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2802 s32 ret_val = IXGBE_SUCCESS;
2803 u32 mflcn_reg, fccfg_reg;
2808 DEBUGFUNC("ixgbe_fc_enable_generic");
2810 /* Validate the water mark configuration */
2811 if (!hw->fc.pause_time) {
2812 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2816 /* Low water mark of zero causes XOFF floods */
2817 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2818 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2819 hw->fc.high_water[i]) {
2820 if (!hw->fc.low_water[i] ||
2821 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2822 DEBUGOUT("Invalid water mark configuration\n");
2823 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2829 /* Negotiate the fc mode to use */
2830 hw->mac.ops.fc_autoneg(hw);
2832 /* Disable any previous flow control settings */
2833 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2834 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2836 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2837 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2840 * The possible values of fc.current_mode are:
2841 * 0: Flow control is completely disabled
2842 * 1: Rx flow control is enabled (we can receive pause frames,
2843 * but not send pause frames).
2844 * 2: Tx flow control is enabled (we can send pause frames but
2845 * we do not support receiving pause frames).
2846 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2849 switch (hw->fc.current_mode) {
2852 * Flow control is disabled by software override or autoneg.
2853 * The code below will actually disable it in the HW.
2856 case ixgbe_fc_rx_pause:
2858 * Rx Flow control is enabled and Tx Flow control is
2859 * disabled by software override. Since there really
2860 * isn't a way to advertise that we are capable of RX
2861 * Pause ONLY, we will advertise that we support both
2862 * symmetric and asymmetric Rx PAUSE. Later, we will
2863 * disable the adapter's ability to send PAUSE frames.
2865 mflcn_reg |= IXGBE_MFLCN_RFCE;
2867 case ixgbe_fc_tx_pause:
2869 * Tx Flow control is enabled, and Rx Flow control is
2870 * disabled by software override.
2872 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2875 /* Flow control (both Rx and Tx) is enabled by SW override. */
2876 mflcn_reg |= IXGBE_MFLCN_RFCE;
2877 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2880 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2881 "Flow control param set incorrectly\n");
2882 ret_val = IXGBE_ERR_CONFIG;
2887 /* Set 802.3x based flow control settings. */
2888 mflcn_reg |= IXGBE_MFLCN_DPF;
2889 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2890 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2893 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2894 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2895 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2896 hw->fc.high_water[i]) {
2897 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2898 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2899 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2901 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2903 * In order to prevent Tx hangs when the internal Tx
2904 * switch is enabled we must set the high water mark
2905 * to the Rx packet buffer size - 24KB. This allows
2906 * the Tx switch to function even under heavy Rx
2909 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2912 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2915 /* Configure pause time (2 TCs per register) */
2916 reg = hw->fc.pause_time * 0x00010001;
2917 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2918 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2920 /* Configure flow control refresh threshold value */
2921 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2928 * ixgbe_negotiate_fc - Negotiate flow control
2929 * @hw: pointer to hardware structure
2930 * @adv_reg: flow control advertised settings
2931 * @lp_reg: link partner's flow control settings
2932 * @adv_sym: symmetric pause bit in advertisement
2933 * @adv_asm: asymmetric pause bit in advertisement
2934 * @lp_sym: symmetric pause bit in link partner advertisement
2935 * @lp_asm: asymmetric pause bit in link partner advertisement
2937 * Find the intersection between advertised settings and link partner's
2938 * advertised settings
2940 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2941 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2943 if ((!(adv_reg)) || (!(lp_reg))) {
2944 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2945 "Local or link partner's advertised flow control "
2946 "settings are NULL. Local: %x, link partner: %x\n",
2948 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2951 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2953 * Now we need to check if the user selected Rx ONLY
2954 * of pause frames. In this case, we had to advertise
2955 * FULL flow control because we could not advertise RX
2956 * ONLY. Hence, we must now check to see if we need to
2957 * turn OFF the TRANSMISSION of PAUSE frames.
2959 if (hw->fc.requested_mode == ixgbe_fc_full) {
2960 hw->fc.current_mode = ixgbe_fc_full;
2961 DEBUGOUT("Flow Control = FULL.\n");
2963 hw->fc.current_mode = ixgbe_fc_rx_pause;
2964 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2966 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2967 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2968 hw->fc.current_mode = ixgbe_fc_tx_pause;
2969 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2970 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2971 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2972 hw->fc.current_mode = ixgbe_fc_rx_pause;
2973 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2975 hw->fc.current_mode = ixgbe_fc_none;
2976 DEBUGOUT("Flow Control = NONE.\n");
2978 return IXGBE_SUCCESS;
2982 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2983 * @hw: pointer to hardware structure
2985 * Enable flow control according on 1 gig fiber.
2987 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2989 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2990 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2993 * On multispeed fiber at 1g, bail out if
2994 * - link is up but AN did not complete, or if
2995 * - link is up and AN completed but timed out
2998 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2999 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3000 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3001 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3005 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3006 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3008 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3009 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3010 IXGBE_PCS1GANA_ASM_PAUSE,
3011 IXGBE_PCS1GANA_SYM_PAUSE,
3012 IXGBE_PCS1GANA_ASM_PAUSE);
3019 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3020 * @hw: pointer to hardware structure
3022 * Enable flow control according to IEEE clause 37.
3024 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3026 u32 links2, anlp1_reg, autoc_reg, links;
3027 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3030 * On backplane, bail out if
3031 * - backplane autoneg was not completed, or if
3032 * - we are 82599 and link partner is not AN enabled
3034 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3035 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3036 DEBUGOUT("Auto-Negotiation did not complete\n");
3040 if (hw->mac.type == ixgbe_mac_82599EB) {
3041 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3042 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3043 DEBUGOUT("Link partner is not AN enabled\n");
3048 * Read the 10g AN autoc and LP ability registers and resolve
3049 * local flow control settings accordingly
3051 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3052 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3054 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3055 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3056 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3063 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3064 * @hw: pointer to hardware structure
3066 * Enable flow control according to IEEE clause 37.
3068 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3070 u16 technology_ability_reg = 0;
3071 u16 lp_technology_ability_reg = 0;
3073 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3074 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3075 &technology_ability_reg);
3076 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3077 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3078 &lp_technology_ability_reg);
3080 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3081 (u32)lp_technology_ability_reg,
3082 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3083 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3087 * ixgbe_fc_autoneg - Configure flow control
3088 * @hw: pointer to hardware structure
3090 * Compares our advertised flow control capabilities to those advertised by
3091 * our link partner, and determines the proper flow control mode to use.
3093 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3095 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3096 ixgbe_link_speed speed;
3099 DEBUGFUNC("ixgbe_fc_autoneg");
3102 * AN should have completed when the cable was plugged in.
3103 * Look for reasons to bail out. Bail out if:
3104 * - FC autoneg is disabled, or if
3107 if (hw->fc.disable_fc_autoneg) {
3108 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3109 "Flow control autoneg is disabled");
3113 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3115 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3119 switch (hw->phy.media_type) {
3120 /* Autoneg flow control on fiber adapters */
3121 case ixgbe_media_type_fiber_qsfp:
3122 case ixgbe_media_type_fiber:
3123 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3124 ret_val = ixgbe_fc_autoneg_fiber(hw);
3127 /* Autoneg flow control on backplane adapters */
3128 case ixgbe_media_type_backplane:
3129 ret_val = ixgbe_fc_autoneg_backplane(hw);
3132 /* Autoneg flow control on copper adapters */
3133 case ixgbe_media_type_copper:
3134 if (ixgbe_device_supports_autoneg_fc(hw))
3135 ret_val = ixgbe_fc_autoneg_copper(hw);
3143 if (ret_val == IXGBE_SUCCESS) {
3144 hw->fc.fc_was_autonegged = true;
3146 hw->fc.fc_was_autonegged = false;
3147 hw->fc.current_mode = hw->fc.requested_mode;
3152 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3153 * @hw: pointer to hardware structure
3155 * System-wide timeout range is encoded in PCIe Device Control2 register.
3157 * Add 10% to specified maximum and return the number of times to poll for
3158 * completion timeout, in units of 100 microsec. Never return less than
3159 * 800 = 80 millisec.
3161 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3166 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3167 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3170 case IXGBE_PCIDEVCTRL2_65_130ms:
3171 pollcnt = 1300; /* 130 millisec */
3173 case IXGBE_PCIDEVCTRL2_260_520ms:
3174 pollcnt = 5200; /* 520 millisec */
3176 case IXGBE_PCIDEVCTRL2_1_2s:
3177 pollcnt = 20000; /* 2 sec */
3179 case IXGBE_PCIDEVCTRL2_4_8s:
3180 pollcnt = 80000; /* 8 sec */
3182 case IXGBE_PCIDEVCTRL2_17_34s:
3183 pollcnt = 34000; /* 34 sec */
3185 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3186 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3187 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3188 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3190 pollcnt = 800; /* 80 millisec minimum */
3194 /* add 10% to spec maximum */
3195 return (pollcnt * 11) / 10;
3199 * ixgbe_disable_pcie_master - Disable PCI-express master access
3200 * @hw: pointer to hardware structure
3202 * Disables PCI-Express master access and verifies there are no pending
3203 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3204 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3205 * is returned signifying master requests disabled.
3207 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3209 s32 status = IXGBE_SUCCESS;
3213 DEBUGFUNC("ixgbe_disable_pcie_master");
3215 /* Always set this bit to ensure any future transactions are blocked */
3216 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3218 /* Exit if master requests are blocked */
3219 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3220 IXGBE_REMOVED(hw->hw_addr))
3223 /* Poll for master request bit to clear */
3224 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3226 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3231 * Two consecutive resets are required via CTRL.RST per datasheet
3232 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3233 * of this need. The first reset prevents new master requests from
3234 * being issued by our device. We then must wait 1usec or more for any
3235 * remaining completions from the PCIe bus to trickle in, and then reset
3236 * again to clear out any effects they may have had on our device.
3238 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3239 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3241 if (hw->mac.type >= ixgbe_mac_X550)
3245 * Before proceeding, make sure that the PCIe block does not have
3246 * transactions pending.
3248 poll = ixgbe_pcie_timeout_poll(hw);
3249 for (i = 0; i < poll; i++) {
3251 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3252 if (IXGBE_REMOVED(hw->hw_addr))
3254 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3258 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3259 "PCIe transaction pending bit also did not clear.\n");
3260 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3267 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3268 * @hw: pointer to hardware structure
3269 * @mask: Mask to specify which semaphore to acquire
3271 * Acquires the SWFW semaphore through the GSSR register for the specified
3272 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3274 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3278 u32 fwmask = mask << 5;
3282 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3284 for (i = 0; i < timeout; i++) {
3286 * SW NVM semaphore bit is used for access to all
3287 * SW_FW_SYNC bits (not just NVM)
3289 if (ixgbe_get_eeprom_semaphore(hw))
3290 return IXGBE_ERR_SWFW_SYNC;
3292 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3293 if (!(gssr & (fwmask | swmask))) {
3295 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3296 ixgbe_release_eeprom_semaphore(hw);
3297 return IXGBE_SUCCESS;
3299 /* Resource is currently in use by FW or SW */
3300 ixgbe_release_eeprom_semaphore(hw);
3305 /* If time expired clear the bits holding the lock and retry */
3306 if (gssr & (fwmask | swmask))
3307 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3310 return IXGBE_ERR_SWFW_SYNC;
3314 * ixgbe_release_swfw_sync - Release SWFW semaphore
3315 * @hw: pointer to hardware structure
3316 * @mask: Mask to specify which semaphore to release
3318 * Releases the SWFW semaphore through the GSSR register for the specified
3319 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3321 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3326 DEBUGFUNC("ixgbe_release_swfw_sync");
3328 ixgbe_get_eeprom_semaphore(hw);
3330 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3332 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3334 ixgbe_release_eeprom_semaphore(hw);
3338 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3339 * @hw: pointer to hardware structure
3341 * Stops the receive data path and waits for the HW to internally empty
3342 * the Rx security block
3344 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3346 #define IXGBE_MAX_SECRX_POLL 40
3351 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3354 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3355 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3356 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3357 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3358 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3359 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3362 /* Use interrupt-safe sleep just in case */
3366 /* For informational purposes only */
3367 if (i >= IXGBE_MAX_SECRX_POLL)
3368 DEBUGOUT("Rx unit being enabled before security "
3369 "path fully disabled. Continuing with init.\n");
3371 return IXGBE_SUCCESS;
3375 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3376 * @hw: pointer to hardware structure
3377 * @reg_val: Value we read from AUTOC
3379 * The default case requires no protection so just to the register read.
3381 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3384 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3385 return IXGBE_SUCCESS;
3389 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3390 * @hw: pointer to hardware structure
3391 * @reg_val: value to write to AUTOC
3392 * @locked: bool to indicate whether the SW/FW lock was already taken by
3395 * The default case requires no protection so just to the register write.
3397 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3399 UNREFERENCED_1PARAMETER(locked);
3401 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3402 return IXGBE_SUCCESS;
3406 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3407 * @hw: pointer to hardware structure
3409 * Enables the receive data path.
3411 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3415 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3417 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3418 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3419 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3420 IXGBE_WRITE_FLUSH(hw);
3422 return IXGBE_SUCCESS;
3426 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3427 * @hw: pointer to hardware structure
3428 * @regval: register value to write to RXCTRL
3430 * Enables the Rx DMA unit
3432 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3434 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3436 if (regval & IXGBE_RXCTRL_RXEN)
3437 ixgbe_enable_rx(hw);
3439 ixgbe_disable_rx(hw);
3441 return IXGBE_SUCCESS;
3445 * ixgbe_blink_led_start_generic - Blink LED based on index.
3446 * @hw: pointer to hardware structure
3447 * @index: led number to blink
3449 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3451 ixgbe_link_speed speed = 0;
3454 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3455 s32 ret_val = IXGBE_SUCCESS;
3456 bool locked = false;
3458 DEBUGFUNC("ixgbe_blink_led_start_generic");
3461 return IXGBE_ERR_PARAM;
3464 * Link must be up to auto-blink the LEDs;
3465 * Force it if link is down.
3467 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3470 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3471 if (ret_val != IXGBE_SUCCESS)
3474 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3475 autoc_reg |= IXGBE_AUTOC_FLU;
3477 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3478 if (ret_val != IXGBE_SUCCESS)
3481 IXGBE_WRITE_FLUSH(hw);
3485 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3486 led_reg |= IXGBE_LED_BLINK(index);
3487 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3488 IXGBE_WRITE_FLUSH(hw);
3495 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3496 * @hw: pointer to hardware structure
3497 * @index: led number to stop blinking
3499 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3502 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3503 s32 ret_val = IXGBE_SUCCESS;
3504 bool locked = false;
3506 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3509 return IXGBE_ERR_PARAM;
3512 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3513 if (ret_val != IXGBE_SUCCESS)
3516 autoc_reg &= ~IXGBE_AUTOC_FLU;
3517 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3519 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3520 if (ret_val != IXGBE_SUCCESS)
3523 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3524 led_reg &= ~IXGBE_LED_BLINK(index);
3525 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3526 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3527 IXGBE_WRITE_FLUSH(hw);
3534 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3535 * @hw: pointer to hardware structure
3536 * @san_mac_offset: SAN MAC address offset
3538 * This function will read the EEPROM location for the SAN MAC address
3539 * pointer, and returns the value at that location. This is used in both
3540 * get and set mac_addr routines.
3542 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3543 u16 *san_mac_offset)
3547 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3550 * First read the EEPROM pointer to see if the MAC addresses are
3553 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3556 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3557 "eeprom at offset %d failed",
3558 IXGBE_SAN_MAC_ADDR_PTR);
3565 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3566 * @hw: pointer to hardware structure
3567 * @san_mac_addr: SAN MAC address
3569 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3570 * per-port, so set_lan_id() must be called before reading the addresses.
3571 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3572 * upon for non-SFP connections, so we must call it here.
3574 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3576 u16 san_mac_data, san_mac_offset;
3580 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3583 * First read the EEPROM pointer to see if the MAC addresses are
3584 * available. If they're not, no point in calling set_lan_id() here.
3586 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3587 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3588 goto san_mac_addr_out;
3590 /* make sure we know which port we need to program */
3591 hw->mac.ops.set_lan_id(hw);
3592 /* apply the port offset to the address offset */
3593 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3594 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3595 for (i = 0; i < 3; i++) {
3596 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3599 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3600 "eeprom read at offset %d failed",
3602 goto san_mac_addr_out;
3604 san_mac_addr[i * 2] = (u8)(san_mac_data);
3605 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3608 return IXGBE_SUCCESS;
3612 * No addresses available in this EEPROM. It's not an
3613 * error though, so just wipe the local address and return.
3615 for (i = 0; i < 6; i++)
3616 san_mac_addr[i] = 0xFF;
3617 return IXGBE_SUCCESS;
3621 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3622 * @hw: pointer to hardware structure
3623 * @san_mac_addr: SAN MAC address
3625 * Write a SAN MAC address to the EEPROM.
3627 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3630 u16 san_mac_data, san_mac_offset;
3633 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3635 /* Look for SAN mac address pointer. If not defined, return */
3636 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3637 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3638 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3640 /* Make sure we know which port we need to write */
3641 hw->mac.ops.set_lan_id(hw);
3642 /* Apply the port offset to the address offset */
3643 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3644 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3646 for (i = 0; i < 3; i++) {
3647 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3648 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3649 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3653 return IXGBE_SUCCESS;
3657 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3658 * @hw: pointer to hardware structure
3660 * Read PCIe configuration space, and get the MSI-X vector count from
3661 * the capabilities table.
3663 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3669 switch (hw->mac.type) {
3670 case ixgbe_mac_82598EB:
3671 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3672 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3674 case ixgbe_mac_82599EB:
3675 case ixgbe_mac_X540:
3676 case ixgbe_mac_X550:
3677 case ixgbe_mac_X550EM_x:
3678 case ixgbe_mac_X550EM_a:
3679 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3680 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3686 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3687 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3688 if (IXGBE_REMOVED(hw->hw_addr))
3690 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3692 /* MSI-X count is zero-based in HW */
3695 if (msix_count > max_msix_count)
3696 msix_count = max_msix_count;
3702 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3703 * @hw: pointer to hardware structure
3704 * @addr: Address to put into receive address register
3705 * @vmdq: VMDq pool to assign
3707 * Puts an ethernet address into a receive address register, or
3708 * finds the rar that it is aleady in; adds to the pool list
3710 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3712 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3713 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3715 u32 rar_low, rar_high;
3716 u32 addr_low, addr_high;
3718 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3720 /* swap bytes for HW little endian */
3721 addr_low = addr[0] | (addr[1] << 8)
3724 addr_high = addr[4] | (addr[5] << 8);
3727 * Either find the mac_id in rar or find the first empty space.
3728 * rar_highwater points to just after the highest currently used
3729 * rar in order to shorten the search. It grows when we add a new
3732 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3733 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3735 if (((IXGBE_RAH_AV & rar_high) == 0)
3736 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3737 first_empty_rar = rar;
3738 } else if ((rar_high & 0xFFFF) == addr_high) {
3739 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3740 if (rar_low == addr_low)
3741 break; /* found it already in the rars */
3745 if (rar < hw->mac.rar_highwater) {
3746 /* already there so just add to the pool bits */
3747 ixgbe_set_vmdq(hw, rar, vmdq);
3748 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3749 /* stick it into first empty RAR slot we found */
3750 rar = first_empty_rar;
3751 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3752 } else if (rar == hw->mac.rar_highwater) {
3753 /* add it to the top of the list and inc the highwater mark */
3754 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3755 hw->mac.rar_highwater++;
3756 } else if (rar >= hw->mac.num_rar_entries) {
3757 return IXGBE_ERR_INVALID_MAC_ADDR;
3761 * If we found rar[0], make sure the default pool bit (we use pool 0)
3762 * remains cleared to be sure default pool packets will get delivered
3765 ixgbe_clear_vmdq(hw, rar, 0);
3771 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3772 * @hw: pointer to hardware struct
3773 * @rar: receive address register index to disassociate
3774 * @vmdq: VMDq pool index to remove from the rar
3776 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3778 u32 mpsar_lo, mpsar_hi;
3779 u32 rar_entries = hw->mac.num_rar_entries;
3781 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3783 /* Make sure we are using a valid rar index range */
3784 if (rar >= rar_entries) {
3785 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3786 "RAR index %d is out of range.\n", rar);
3787 return IXGBE_ERR_INVALID_ARGUMENT;
3790 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3791 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3793 if (IXGBE_REMOVED(hw->hw_addr))
3796 if (!mpsar_lo && !mpsar_hi)
3799 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3801 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3805 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3808 } else if (vmdq < 32) {
3809 mpsar_lo &= ~(1 << vmdq);
3810 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3812 mpsar_hi &= ~(1 << (vmdq - 32));
3813 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3816 /* was that the last pool using this rar? */
3817 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3818 rar != 0 && rar != hw->mac.san_mac_rar_index)
3819 hw->mac.ops.clear_rar(hw, rar);
3821 return IXGBE_SUCCESS;
3825 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3826 * @hw: pointer to hardware struct
3827 * @rar: receive address register index to associate with a VMDq index
3828 * @vmdq: VMDq pool index
3830 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3833 u32 rar_entries = hw->mac.num_rar_entries;
3835 DEBUGFUNC("ixgbe_set_vmdq_generic");
3837 /* Make sure we are using a valid rar index range */
3838 if (rar >= rar_entries) {
3839 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3840 "RAR index %d is out of range.\n", rar);
3841 return IXGBE_ERR_INVALID_ARGUMENT;
3845 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3847 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3849 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3850 mpsar |= 1 << (vmdq - 32);
3851 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3853 return IXGBE_SUCCESS;
3857 * This function should only be involved in the IOV mode.
3858 * In IOV mode, Default pool is next pool after the number of
3859 * VFs advertized and not 0.
3860 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3862 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3863 * @hw: pointer to hardware struct
3864 * @vmdq: VMDq pool index
3866 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3868 u32 rar = hw->mac.san_mac_rar_index;
3870 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3873 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3874 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3876 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3877 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3880 return IXGBE_SUCCESS;
3884 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3885 * @hw: pointer to hardware structure
3887 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3891 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3892 DEBUGOUT(" Clearing UTA\n");
3894 for (i = 0; i < 128; i++)
3895 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3897 return IXGBE_SUCCESS;
3901 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3902 * @hw: pointer to hardware structure
3903 * @vlan: VLAN id to write to VLAN filter
3905 * return the VLVF index where this VLAN id should be placed
3908 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3910 s32 regindex, first_empty_slot;
3913 /* short cut the special case */
3917 /* if vlvf_bypass is set we don't want to use an empty slot, we
3918 * will simply bypass the VLVF if there are no entries present in the
3919 * VLVF that contain our VLAN
3921 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3923 /* add VLAN enable bit for comparison */
3924 vlan |= IXGBE_VLVF_VIEN;
3926 /* Search for the vlan id in the VLVF entries. Save off the first empty
3927 * slot found along the way.
3929 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3931 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3932 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3935 if (!first_empty_slot && !bits)
3936 first_empty_slot = regindex;
3939 /* If we are here then we didn't find the VLAN. Return first empty
3940 * slot we found during our search, else error.
3942 if (!first_empty_slot)
3943 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3945 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3949 * ixgbe_set_vfta_generic - Set VLAN filter table
3950 * @hw: pointer to hardware structure
3951 * @vlan: VLAN id to write to VLAN filter
3952 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3953 * @vlan_on: boolean flag to turn on/off VLAN
3954 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3956 * Turn on/off specified VLAN in the VLAN filter table.
3958 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3959 bool vlan_on, bool vlvf_bypass)
3961 u32 regidx, vfta_delta, vfta;
3964 DEBUGFUNC("ixgbe_set_vfta_generic");
3966 if (vlan > 4095 || vind > 63)
3967 return IXGBE_ERR_PARAM;
3970 * this is a 2 part operation - first the VFTA, then the
3971 * VLVF and VLVFB if VT Mode is set
3972 * We don't write the VFTA until we know the VLVF part succeeded.
3976 * The VFTA is a bitstring made up of 128 32-bit registers
3977 * that enable the particular VLAN id, much like the MTA:
3978 * bits[11-5]: which register
3979 * bits[4-0]: which bit in the register
3982 vfta_delta = 1 << (vlan % 32);
3983 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3986 * vfta_delta represents the difference between the current value
3987 * of vfta and the value we want in the register. Since the diff
3988 * is an XOR mask we can just update the vfta using an XOR
3990 vfta_delta &= vlan_on ? ~vfta : vfta;
3994 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3996 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3998 if (ret_val != IXGBE_SUCCESS) {
4005 /* Update VFTA now that we are ready for traffic */
4007 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4009 return IXGBE_SUCCESS;
4013 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4014 * @hw: pointer to hardware structure
4015 * @vlan: VLAN id to write to VLAN filter
4016 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4017 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4018 * @vfta_delta: pointer to the difference between the current value of VFTA
4019 * and the desired value
4020 * @vfta: the desired value of the VFTA
4021 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4023 * Turn on/off specified bit in VLVF table.
4025 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4026 bool vlan_on, u32 *vfta_delta, u32 vfta,
4032 DEBUGFUNC("ixgbe_set_vlvf_generic");
4034 if (vlan > 4095 || vind > 63)
4035 return IXGBE_ERR_PARAM;
4037 /* If VT Mode is set
4039 * make sure the vlan is in VLVF
4040 * set the vind bit in the matching VLVFB
4042 * clear the pool bit and possibly the vind
4044 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4045 return IXGBE_SUCCESS;
4047 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4051 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4053 /* set the pool bit */
4054 bits |= 1 << (vind % 32);
4058 /* clear the pool bit */
4059 bits ^= 1 << (vind % 32);
4062 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4063 /* Clear VFTA first, then disable VLVF. Otherwise
4064 * we run the risk of stray packets leaking into
4065 * the PF via the default pool
4068 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4070 /* disable VLVF and clear remaining bit from pool */
4071 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4072 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4074 return IXGBE_SUCCESS;
4077 /* If there are still bits set in the VLVFB registers
4078 * for the VLAN ID indicated we need to see if the
4079 * caller is requesting that we clear the VFTA entry bit.
4080 * If the caller has requested that we clear the VFTA
4081 * entry bit but there are still pools/VFs using this VLAN
4082 * ID entry then ignore the request. We're not worried
4083 * about the case where we're turning the VFTA VLAN ID
4084 * entry bit on, only when requested to turn it off as
4085 * there may be multiple pools and/or VFs using the
4086 * VLAN ID entry. In that case we cannot clear the
4087 * VFTA bit until all pools/VFs using that VLAN ID have also
4088 * been cleared. This will be indicated by "bits" being
4094 /* record pool change and enable VLAN ID if not already enabled */
4095 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4096 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4098 return IXGBE_SUCCESS;
4102 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4103 * @hw: pointer to hardware structure
4105 * Clears the VLAN filer table, and the VMDq index associated with the filter
4107 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4111 DEBUGFUNC("ixgbe_clear_vfta_generic");
4113 for (offset = 0; offset < hw->mac.vft_size; offset++)
4114 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4116 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4117 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4118 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4119 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4122 return IXGBE_SUCCESS;
4126 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4127 * @hw: pointer to hardware structure
4129 * Contains the logic to identify if we need to verify link for the
4132 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4135 /* Does FW say we need the fix */
4136 if (!hw->need_crosstalk_fix)
4139 /* Only consider SFP+ PHYs i.e. media type fiber */
4140 switch (hw->mac.ops.get_media_type(hw)) {
4141 case ixgbe_media_type_fiber:
4142 case ixgbe_media_type_fiber_qsfp:
4152 * ixgbe_check_mac_link_generic - Determine link and speed status
4153 * @hw: pointer to hardware structure
4154 * @speed: pointer to link speed
4155 * @link_up: true when link is up
4156 * @link_up_wait_to_complete: bool used to wait for link up or not
4158 * Reads the links register to determine if link is up and the current speed
4160 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4161 bool *link_up, bool link_up_wait_to_complete)
4163 u32 links_reg, links_orig;
4166 DEBUGFUNC("ixgbe_check_mac_link_generic");
4168 /* If Crosstalk fix enabled do the sanity check of making sure
4169 * the SFP+ cage is full.
4171 if (ixgbe_need_crosstalk_fix(hw)) {
4174 switch (hw->mac.type) {
4175 case ixgbe_mac_82599EB:
4176 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4179 case ixgbe_mac_X550EM_x:
4180 case ixgbe_mac_X550EM_a:
4181 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4185 /* sanity check - No SFP+ devices here */
4186 sfp_cage_full = false;
4190 if (!sfp_cage_full) {
4192 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4193 return IXGBE_SUCCESS;
4197 /* clear the old state */
4198 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4200 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4202 if (links_orig != links_reg) {
4203 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4204 links_orig, links_reg);
4207 if (link_up_wait_to_complete) {
4208 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4209 if (links_reg & IXGBE_LINKS_UP) {
4216 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4219 if (links_reg & IXGBE_LINKS_UP)
4225 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4226 case IXGBE_LINKS_SPEED_10G_82599:
4227 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4228 if (hw->mac.type >= ixgbe_mac_X550) {
4229 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4230 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4233 case IXGBE_LINKS_SPEED_1G_82599:
4234 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4236 case IXGBE_LINKS_SPEED_100_82599:
4237 *speed = IXGBE_LINK_SPEED_100_FULL;
4238 if (hw->mac.type == ixgbe_mac_X550) {
4239 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4240 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4243 case IXGBE_LINKS_SPEED_10_X550EM_A:
4244 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4245 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4246 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
4247 *speed = IXGBE_LINK_SPEED_10_FULL;
4251 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4254 return IXGBE_SUCCESS;
4258 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4260 * @hw: pointer to hardware structure
4261 * @wwnn_prefix: the alternative WWNN prefix
4262 * @wwpn_prefix: the alternative WWPN prefix
4264 * This function will read the EEPROM from the alternative SAN MAC address
4265 * block to check the support for the alternative WWNN/WWPN prefix support.
4267 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4271 u16 alt_san_mac_blk_offset;
4273 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4275 /* clear output first */
4276 *wwnn_prefix = 0xFFFF;
4277 *wwpn_prefix = 0xFFFF;
4279 /* check if alternative SAN MAC is supported */
4280 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4281 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4282 goto wwn_prefix_err;
4284 if ((alt_san_mac_blk_offset == 0) ||
4285 (alt_san_mac_blk_offset == 0xFFFF))
4286 goto wwn_prefix_out;
4288 /* check capability in alternative san mac address block */
4289 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4290 if (hw->eeprom.ops.read(hw, offset, &caps))
4291 goto wwn_prefix_err;
4292 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4293 goto wwn_prefix_out;
4295 /* get the corresponding prefix for WWNN/WWPN */
4296 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4297 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4298 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4299 "eeprom read at offset %d failed", offset);
4302 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4303 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4304 goto wwn_prefix_err;
4307 return IXGBE_SUCCESS;
4310 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4311 "eeprom read at offset %d failed", offset);
4312 return IXGBE_SUCCESS;
4316 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4317 * @hw: pointer to hardware structure
4318 * @bs: the fcoe boot status
4320 * This function will read the FCOE boot status from the iSCSI FCOE block
4322 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4324 u16 offset, caps, flags;
4327 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4329 /* clear output first */
4330 *bs = ixgbe_fcoe_bootstatus_unavailable;
4332 /* check if FCOE IBA block is present */
4333 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4334 status = hw->eeprom.ops.read(hw, offset, &caps);
4335 if (status != IXGBE_SUCCESS)
4338 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4341 /* check if iSCSI FCOE block is populated */
4342 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4343 if (status != IXGBE_SUCCESS)
4346 if ((offset == 0) || (offset == 0xFFFF))
4349 /* read fcoe flags in iSCSI FCOE block */
4350 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4351 status = hw->eeprom.ops.read(hw, offset, &flags);
4352 if (status != IXGBE_SUCCESS)
4355 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4356 *bs = ixgbe_fcoe_bootstatus_enabled;
4358 *bs = ixgbe_fcoe_bootstatus_disabled;
4365 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4366 * @hw: pointer to hardware structure
4367 * @enable: enable or disable switch for MAC anti-spoofing
4368 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4371 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4373 int vf_target_reg = vf >> 3;
4374 int vf_target_shift = vf % 8;
4377 if (hw->mac.type == ixgbe_mac_82598EB)
4380 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4382 pfvfspoof |= (1 << vf_target_shift);
4384 pfvfspoof &= ~(1 << vf_target_shift);
4385 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4389 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4390 * @hw: pointer to hardware structure
4391 * @enable: enable or disable switch for VLAN anti-spoofing
4392 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4395 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4397 int vf_target_reg = vf >> 3;
4398 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4401 if (hw->mac.type == ixgbe_mac_82598EB)
4404 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4406 pfvfspoof |= (1 << vf_target_shift);
4408 pfvfspoof &= ~(1 << vf_target_shift);
4409 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4413 * ixgbe_get_device_caps_generic - Get additional device capabilities
4414 * @hw: pointer to hardware structure
4415 * @device_caps: the EEPROM word with the extra device capabilities
4417 * This function will read the EEPROM location for the device capabilities,
4418 * and return the word through device_caps.
4420 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4422 DEBUGFUNC("ixgbe_get_device_caps_generic");
4424 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4426 return IXGBE_SUCCESS;
4430 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4431 * @hw: pointer to hardware structure
4434 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4439 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4441 /* Enable relaxed ordering */
4442 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4443 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4444 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4445 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4448 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4449 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4450 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4451 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4452 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4458 * ixgbe_calculate_checksum - Calculate checksum for buffer
4459 * @buffer: pointer to EEPROM
4460 * @length: size of EEPROM to calculate a checksum for
4461 * Calculates the checksum for some buffer on a specified length. The
4462 * checksum calculated is returned.
4464 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4469 DEBUGFUNC("ixgbe_calculate_checksum");
4474 for (i = 0; i < length; i++)
4477 return (u8) (0 - sum);
4481 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4482 * @hw: pointer to the HW structure
4483 * @buffer: command to write and where the return status will be placed
4484 * @length: length of buffer, must be multiple of 4 bytes
4485 * @timeout: time in ms to wait for command completion
4487 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4488 * else returns semaphore error when encountering an error acquiring
4489 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4491 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4494 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4500 DEBUGFUNC("ixgbe_hic_unlocked");
4502 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4503 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4504 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4507 /* Set bit 9 of FWSTS clearing FW reset indication */
4508 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4509 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4511 /* Check that the host interface is enabled. */
4512 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4513 if (!(hicr & IXGBE_HICR_EN)) {
4514 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4515 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4518 /* Calculate length in DWORDs. We must be DWORD aligned */
4519 if (length % sizeof(u32)) {
4520 DEBUGOUT("Buffer length failure, not aligned to dword");
4521 return IXGBE_ERR_INVALID_ARGUMENT;
4524 dword_len = length >> 2;
4526 /* The device driver writes the relevant command block
4527 * into the ram area.
4529 for (i = 0; i < dword_len; i++)
4530 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4531 i, IXGBE_CPU_TO_LE32(buffer[i]));
4533 /* Setting this bit tells the ARC that a new command is pending. */
4534 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4536 for (i = 0; i < timeout; i++) {
4537 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4538 if (!(hicr & IXGBE_HICR_C))
4543 /* Check command completion */
4544 if ((timeout && i == timeout) ||
4545 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4546 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4547 "Command has failed with no status valid.\n");
4548 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4551 return IXGBE_SUCCESS;
4555 * ixgbe_host_interface_command - Issue command to manageability block
4556 * @hw: pointer to the HW structure
4557 * @buffer: contains the command to write and where the return status will
4559 * @length: length of buffer, must be multiple of 4 bytes
4560 * @timeout: time in ms to wait for command completion
4561 * @return_data: read and return data from the buffer (true) or not (false)
4562 * Needed because FW structures are big endian and decoding of
4563 * these fields can be 8 bit or 16 bit based on command. Decoding
4564 * is not easily understood without making a table of commands.
4565 * So we will leave this up to the caller to read back the data
4568 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4569 * else returns semaphore error when encountering an error acquiring
4570 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4572 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4573 u32 length, u32 timeout, bool return_data)
4575 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4581 DEBUGFUNC("ixgbe_host_interface_command");
4583 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4584 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4585 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4588 /* Take management host interface semaphore */
4589 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4593 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4600 /* Calculate length in DWORDs */
4601 dword_len = hdr_size >> 2;
4603 /* first pull in the header so we know the buffer length */
4604 for (bi = 0; bi < dword_len; bi++) {
4605 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4606 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4609 /* If there is any thing in data position pull it in */
4610 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4614 if (length < buf_len + hdr_size) {
4615 DEBUGOUT("Buffer not large enough for reply message.\n");
4616 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4620 /* Calculate length in DWORDs, add 3 for odd lengths */
4621 dword_len = (buf_len + 3) >> 2;
4623 /* Pull in the rest of the buffer (bi is where we left off) */
4624 for (; bi <= dword_len; bi++) {
4625 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4626 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4630 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4636 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4637 * @hw: pointer to the HW structure
4638 * @maj: driver version major number
4639 * @min: driver version minor number
4640 * @build: driver version build number
4641 * @sub: driver version sub build number
4643 * Sends driver version number to firmware through the manageability
4644 * block. On success return IXGBE_SUCCESS
4645 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4646 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4648 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4649 u8 build, u8 sub, u16 len,
4650 const char *driver_ver)
4652 struct ixgbe_hic_drv_info fw_cmd;
4654 s32 ret_val = IXGBE_SUCCESS;
4656 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4657 UNREFERENCED_2PARAMETER(len, driver_ver);
4659 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4660 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4661 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4662 fw_cmd.port_num = (u8)hw->bus.func;
4663 fw_cmd.ver_maj = maj;
4664 fw_cmd.ver_min = min;
4665 fw_cmd.ver_build = build;
4666 fw_cmd.ver_sub = sub;
4667 fw_cmd.hdr.checksum = 0;
4668 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4669 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4673 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4674 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4676 IXGBE_HI_COMMAND_TIMEOUT,
4678 if (ret_val != IXGBE_SUCCESS)
4681 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4682 FW_CEM_RESP_STATUS_SUCCESS)
4683 ret_val = IXGBE_SUCCESS;
4685 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4694 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4695 * @hw: pointer to hardware structure
4696 * @num_pb: number of packet buffers to allocate
4697 * @headroom: reserve n KB of headroom
4698 * @strategy: packet buffer allocation strategy
4700 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4703 u32 pbsize = hw->mac.rx_pb_size;
4705 u32 rxpktsize, txpktsize, txpbthresh;
4707 /* Reserve headroom */
4713 /* Divide remaining packet buffer space amongst the number of packet
4714 * buffers requested using supplied strategy.
4717 case PBA_STRATEGY_WEIGHTED:
4718 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4719 * buffer with 5/8 of the packet buffer space.
4721 rxpktsize = (pbsize * 5) / (num_pb * 4);
4722 pbsize -= rxpktsize * (num_pb / 2);
4723 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4724 for (; i < (num_pb / 2); i++)
4725 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4726 /* Fall through to configure remaining packet buffers */
4727 case PBA_STRATEGY_EQUAL:
4728 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4729 for (; i < num_pb; i++)
4730 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4736 /* Only support an equally distributed Tx packet buffer strategy. */
4737 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4738 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4739 for (i = 0; i < num_pb; i++) {
4740 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4741 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4744 /* Clear unused TCs, if any, to zero buffer size*/
4745 for (; i < IXGBE_MAX_PB; i++) {
4746 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4747 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4748 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4753 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4754 * @hw: pointer to the hardware structure
4756 * The 82599 and x540 MACs can experience issues if TX work is still pending
4757 * when a reset occurs. This function prevents this by flushing the PCIe
4758 * buffers on the system.
4760 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4762 u32 gcr_ext, hlreg0, i, poll;
4766 * If double reset is not requested then all transactions should
4767 * already be clear and as such there is no work to do
4769 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4773 * Set loopback enable to prevent any transmits from being sent
4774 * should the link come up. This assumes that the RXCTRL.RXEN bit
4775 * has already been cleared.
4777 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4778 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4780 /* Wait for a last completion before clearing buffers */
4781 IXGBE_WRITE_FLUSH(hw);
4785 * Before proceeding, make sure that the PCIe block does not have
4786 * transactions pending.
4788 poll = ixgbe_pcie_timeout_poll(hw);
4789 for (i = 0; i < poll; i++) {
4791 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4792 if (IXGBE_REMOVED(hw->hw_addr))
4794 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4799 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4800 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4801 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4802 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4804 /* Flush all writes and allow 20usec for all transactions to clear */
4805 IXGBE_WRITE_FLUSH(hw);
4808 /* restore previous register values */
4809 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4810 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4813 STATIC const u8 ixgbe_emc_temp_data[4] = {
4814 IXGBE_EMC_INTERNAL_DATA,
4815 IXGBE_EMC_DIODE1_DATA,
4816 IXGBE_EMC_DIODE2_DATA,
4817 IXGBE_EMC_DIODE3_DATA
4819 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4820 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4821 IXGBE_EMC_DIODE1_THERM_LIMIT,
4822 IXGBE_EMC_DIODE2_THERM_LIMIT,
4823 IXGBE_EMC_DIODE3_THERM_LIMIT
4827 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4828 * @hw: pointer to hardware structure
4829 * @data: pointer to the thermal sensor data structure
4831 * Returns the thermal sensor data structure
4833 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4835 s32 status = IXGBE_SUCCESS;
4843 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4845 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4847 /* Only support thermal sensors attached to 82599 physical port 0 */
4848 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4849 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4850 status = IXGBE_NOT_IMPLEMENTED;
4854 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4858 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4859 status = IXGBE_NOT_IMPLEMENTED;
4863 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4867 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4868 != IXGBE_ETS_TYPE_EMC) {
4869 status = IXGBE_NOT_IMPLEMENTED;
4873 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4874 if (num_sensors > IXGBE_MAX_SENSORS)
4875 num_sensors = IXGBE_MAX_SENSORS;
4877 for (i = 0; i < num_sensors; i++) {
4878 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4883 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4884 IXGBE_ETS_DATA_INDEX_SHIFT);
4885 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4886 IXGBE_ETS_DATA_LOC_SHIFT);
4888 if (sensor_location != 0) {
4889 status = hw->phy.ops.read_i2c_byte(hw,
4890 ixgbe_emc_temp_data[sensor_index],
4891 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4892 &data->sensor[i].temp);
4902 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4903 * @hw: pointer to hardware structure
4905 * Inits the thermal sensor thresholds according to the NVM map
4906 * and save off the threshold and location values into mac.thermal_sensor_data
4908 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4910 s32 status = IXGBE_SUCCESS;
4915 u8 low_thresh_delta;
4921 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4923 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4925 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4927 /* Only support thermal sensors attached to 82599 physical port 0 */
4928 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4929 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4930 return IXGBE_NOT_IMPLEMENTED;
4932 offset = IXGBE_ETS_CFG;
4933 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4935 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4936 return IXGBE_NOT_IMPLEMENTED;
4938 offset = ets_offset;
4939 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4941 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4942 != IXGBE_ETS_TYPE_EMC)
4943 return IXGBE_NOT_IMPLEMENTED;
4945 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4946 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4947 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4949 for (i = 0; i < num_sensors; i++) {
4950 offset = ets_offset + 1 + i;
4951 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4952 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4953 "eeprom read at offset %d failed",
4957 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4958 IXGBE_ETS_DATA_INDEX_SHIFT);
4959 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4960 IXGBE_ETS_DATA_LOC_SHIFT);
4961 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4963 hw->phy.ops.write_i2c_byte(hw,
4964 ixgbe_emc_therm_limit[sensor_index],
4965 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4967 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4968 data->sensor[i].location = sensor_location;
4969 data->sensor[i].caution_thresh = therm_limit;
4970 data->sensor[i].max_op_thresh = therm_limit -
4977 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4978 "eeprom read at offset %d failed", offset);
4979 return IXGBE_NOT_IMPLEMENTED;
4984 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4985 * @hw: pointer to hardware structure
4986 * @map: pointer to u8 arr for returning map
4988 * Read the rtrup2tc HW register and resolve its content into map
4990 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4994 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4995 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4996 map[i] = IXGBE_RTRUP2TC_UP_MASK &
4997 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5001 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5006 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5007 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5008 if (hw->mac.type != ixgbe_mac_82598EB) {
5009 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5010 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5011 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5012 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5013 hw->mac.set_lben = true;
5015 hw->mac.set_lben = false;
5018 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5019 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5023 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5028 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5029 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5031 if (hw->mac.type != ixgbe_mac_82598EB) {
5032 if (hw->mac.set_lben) {
5033 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5034 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5035 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5036 hw->mac.set_lben = false;
5042 * ixgbe_mng_present - returns true when management capability is present
5043 * @hw: pointer to hardware structure
5045 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5049 if (hw->mac.type < ixgbe_mac_82599EB)
5052 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5053 fwsm &= IXGBE_FWSM_MODE_MASK;
5054 return fwsm == IXGBE_FWSM_FW_MODE_PT;
5058 * ixgbe_mng_enabled - Is the manageability engine enabled?
5059 * @hw: pointer to hardware structure
5061 * Returns true if the manageability engine is enabled.
5063 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5065 u32 fwsm, manc, factps;
5067 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5068 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5071 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5072 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5075 if (hw->mac.type <= ixgbe_mac_X540) {
5076 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5077 if (factps & IXGBE_FACTPS_MNGCG)
5085 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5086 * @hw: pointer to hardware structure
5087 * @speed: new link speed
5088 * @autoneg_wait_to_complete: true when waiting for completion is needed
5090 * Set the link speed in the MAC and/or PHY register and restarts link.
5092 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5093 ixgbe_link_speed speed,
5094 bool autoneg_wait_to_complete)
5096 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5097 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5098 s32 status = IXGBE_SUCCESS;
5101 bool autoneg, link_up = false;
5103 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5105 /* Mask off requested but non-supported speeds */
5106 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5107 if (status != IXGBE_SUCCESS)
5110 speed &= link_speed;
5112 /* Try each speed one by one, highest priority first. We do this in
5113 * software because 10Gb fiber doesn't support speed autonegotiation.
5115 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5117 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5119 /* Set the module link speed */
5120 switch (hw->phy.media_type) {
5121 case ixgbe_media_type_fiber:
5122 ixgbe_set_rate_select_speed(hw,
5123 IXGBE_LINK_SPEED_10GB_FULL);
5125 case ixgbe_media_type_fiber_qsfp:
5126 /* QSFP module automatically detects MAC link speed */
5129 DEBUGOUT("Unexpected media type.\n");
5133 /* Allow module to change analog characteristics (1G->10G) */
5136 status = ixgbe_setup_mac_link(hw,
5137 IXGBE_LINK_SPEED_10GB_FULL,
5138 autoneg_wait_to_complete);
5139 if (status != IXGBE_SUCCESS)
5142 /* Flap the Tx laser if it has not already been done */
5143 ixgbe_flap_tx_laser(hw);
5145 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5146 * Section 73.10.2, we may have to wait up to 500ms if KR is
5147 * attempted. 82599 uses the same timing for 10g SFI.
5149 for (i = 0; i < 5; i++) {
5150 /* Wait for the link partner to also set speed */
5153 /* If we have link, just jump out */
5154 status = ixgbe_check_link(hw, &link_speed,
5156 if (status != IXGBE_SUCCESS)
5164 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5166 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5167 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5169 /* Set the module link speed */
5170 switch (hw->phy.media_type) {
5171 case ixgbe_media_type_fiber:
5172 ixgbe_set_rate_select_speed(hw,
5173 IXGBE_LINK_SPEED_1GB_FULL);
5175 case ixgbe_media_type_fiber_qsfp:
5176 /* QSFP module automatically detects link speed */
5179 DEBUGOUT("Unexpected media type.\n");
5183 /* Allow module to change analog characteristics (10G->1G) */
5186 status = ixgbe_setup_mac_link(hw,
5187 IXGBE_LINK_SPEED_1GB_FULL,
5188 autoneg_wait_to_complete);
5189 if (status != IXGBE_SUCCESS)
5192 /* Flap the Tx laser if it has not already been done */
5193 ixgbe_flap_tx_laser(hw);
5195 /* Wait for the link partner to also set speed */
5198 /* If we have link, just jump out */
5199 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5200 if (status != IXGBE_SUCCESS)
5207 /* We didn't get link. Configure back to the highest speed we tried,
5208 * (if there was more than one). We call ourselves back with just the
5209 * single highest speed that the user requested.
5212 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5214 autoneg_wait_to_complete);
5217 /* Set autoneg_advertised value based on input link speed */
5218 hw->phy.autoneg_advertised = 0;
5220 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5221 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5223 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5224 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5230 * ixgbe_set_soft_rate_select_speed - Set module link speed
5231 * @hw: pointer to hardware structure
5232 * @speed: link speed to set
5234 * Set module link speed via the soft rate select.
5236 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5237 ixgbe_link_speed speed)
5243 case IXGBE_LINK_SPEED_10GB_FULL:
5244 /* one bit mask same as setting on */
5245 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5247 case IXGBE_LINK_SPEED_1GB_FULL:
5248 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5251 DEBUGOUT("Invalid fixed module speed\n");
5256 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5257 IXGBE_I2C_EEPROM_DEV_ADDR2,
5260 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5264 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5266 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5267 IXGBE_I2C_EEPROM_DEV_ADDR2,
5270 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5275 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5276 IXGBE_I2C_EEPROM_DEV_ADDR2,
5279 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5283 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5285 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5286 IXGBE_I2C_EEPROM_DEV_ADDR2,
5289 DEBUGOUT("Failed to write Rx Rate Select RS1\n");