1 /*******************************************************************************
3 Copyright (c) 2001-2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_common.h"
35 #include "ixgbe_phy.h"
36 #include "ixgbe_dcb.h"
37 #include "ixgbe_dcb_82599.h"
38 #include "ixgbe_api.h"
40 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
41 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
42 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
43 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
44 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
45 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
47 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
48 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
49 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
50 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
52 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
55 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
56 u16 words, u16 *data);
57 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
58 u16 words, u16 *data);
59 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
63 * ixgbe_init_ops_generic - Inits function ptrs
64 * @hw: pointer to the hardware structure
66 * Initialize the function pointers.
68 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
70 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
71 struct ixgbe_mac_info *mac = &hw->mac;
72 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
74 DEBUGFUNC("ixgbe_init_ops_generic");
77 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
78 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
79 if (eec & IXGBE_EEC_PRES) {
80 eeprom->ops.read = ixgbe_read_eerd_generic;
81 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
83 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
84 eeprom->ops.read_buffer =
85 ixgbe_read_eeprom_buffer_bit_bang_generic;
87 eeprom->ops.write = ixgbe_write_eeprom_generic;
88 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
89 eeprom->ops.validate_checksum =
90 ixgbe_validate_eeprom_checksum_generic;
91 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
92 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
95 mac->ops.init_hw = ixgbe_init_hw_generic;
96 mac->ops.reset_hw = NULL;
97 mac->ops.start_hw = ixgbe_start_hw_generic;
98 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
99 mac->ops.get_media_type = NULL;
100 mac->ops.get_supported_physical_layer = NULL;
101 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
102 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
103 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
104 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
105 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
106 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
107 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
108 mac->ops.prot_autoc_read = prot_autoc_read_generic;
109 mac->ops.prot_autoc_write = prot_autoc_write_generic;
112 mac->ops.led_on = ixgbe_led_on_generic;
113 mac->ops.led_off = ixgbe_led_off_generic;
114 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
115 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
116 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
118 /* RAR, Multicast, VLAN */
119 mac->ops.set_rar = ixgbe_set_rar_generic;
120 mac->ops.clear_rar = ixgbe_clear_rar_generic;
121 mac->ops.insert_mac_addr = NULL;
122 mac->ops.set_vmdq = NULL;
123 mac->ops.clear_vmdq = NULL;
124 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
125 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
126 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
127 mac->ops.enable_mc = ixgbe_enable_mc_generic;
128 mac->ops.disable_mc = ixgbe_disable_mc_generic;
129 mac->ops.clear_vfta = NULL;
130 mac->ops.set_vfta = NULL;
131 mac->ops.set_vlvf = NULL;
132 mac->ops.init_uta_tables = NULL;
133 mac->ops.enable_rx = ixgbe_enable_rx_generic;
134 mac->ops.disable_rx = ixgbe_disable_rx_generic;
137 mac->ops.fc_enable = ixgbe_fc_enable_generic;
138 mac->ops.setup_fc = ixgbe_setup_fc_generic;
139 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
142 mac->ops.get_link_capabilities = NULL;
143 mac->ops.setup_link = NULL;
144 mac->ops.check_link = NULL;
145 mac->ops.dmac_config = NULL;
146 mac->ops.dmac_update_tcs = NULL;
147 mac->ops.dmac_config_tcs = NULL;
149 return IXGBE_SUCCESS;
153 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
155 * @hw: pointer to hardware structure
157 * This function returns true if the device supports flow control
158 * autonegotiation, and false if it does not.
161 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
163 bool supported = false;
164 ixgbe_link_speed speed;
167 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
169 switch (hw->phy.media_type) {
170 case ixgbe_media_type_fiber_qsfp:
171 case ixgbe_media_type_fiber:
172 /* flow control autoneg black list */
173 switch (hw->device_id) {
174 case IXGBE_DEV_ID_X550EM_A_SFP:
175 case IXGBE_DEV_ID_X550EM_A_SFP_N:
176 case IXGBE_DEV_ID_X550EM_A_QSFP:
177 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
181 hw->mac.ops.check_link(hw, &speed, &link_up, false);
182 /* if link is down, assume supported */
184 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
191 case ixgbe_media_type_backplane:
192 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
197 case ixgbe_media_type_copper:
198 /* only some copper devices support flow control autoneg */
199 switch (hw->device_id) {
200 case IXGBE_DEV_ID_82599_T3_LOM:
201 case IXGBE_DEV_ID_X540T:
202 case IXGBE_DEV_ID_X540T1:
203 case IXGBE_DEV_ID_X550T:
204 case IXGBE_DEV_ID_X550T1:
205 case IXGBE_DEV_ID_X550EM_X_10G_T:
206 case IXGBE_DEV_ID_X550EM_A_10G_T:
207 case IXGBE_DEV_ID_X550EM_A_1G_T:
208 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
219 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
220 "Device %x does not support flow control autoneg",
226 * ixgbe_setup_fc_generic - Set up flow control
227 * @hw: pointer to hardware structure
229 * Called at init time to set up flow control.
231 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
233 s32 ret_val = IXGBE_SUCCESS;
234 u32 reg = 0, reg_bp = 0;
238 DEBUGFUNC("ixgbe_setup_fc_generic");
240 /* Validate the requested mode */
241 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
242 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
243 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
244 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
249 * 10gig parts do not have a word in the EEPROM to determine the
250 * default flow control setting, so we explicitly set it to full.
252 if (hw->fc.requested_mode == ixgbe_fc_default)
253 hw->fc.requested_mode = ixgbe_fc_full;
256 * Set up the 1G and 10G flow control advertisement registers so the
257 * HW will be able to do fc autoneg once the cable is plugged in. If
258 * we link at 10G, the 1G advertisement is harmless and vice versa.
260 switch (hw->phy.media_type) {
261 case ixgbe_media_type_backplane:
262 /* some MAC's need RMW protection on AUTOC */
263 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
264 if (ret_val != IXGBE_SUCCESS)
267 /* only backplane uses autoc so fall though */
268 case ixgbe_media_type_fiber_qsfp:
269 case ixgbe_media_type_fiber:
270 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
273 case ixgbe_media_type_copper:
274 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
275 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
282 * The possible values of fc.requested_mode are:
283 * 0: Flow control is completely disabled
284 * 1: Rx flow control is enabled (we can receive pause frames,
285 * but not send pause frames).
286 * 2: Tx flow control is enabled (we can send pause frames but
287 * we do not support receiving pause frames).
288 * 3: Both Rx and Tx flow control (symmetric) are enabled.
291 switch (hw->fc.requested_mode) {
293 /* Flow control completely disabled by software override. */
294 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
295 if (hw->phy.media_type == ixgbe_media_type_backplane)
296 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
297 IXGBE_AUTOC_ASM_PAUSE);
298 else if (hw->phy.media_type == ixgbe_media_type_copper)
299 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
301 case ixgbe_fc_tx_pause:
303 * Tx Flow control is enabled, and Rx Flow control is
304 * disabled by software override.
306 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
307 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
308 if (hw->phy.media_type == ixgbe_media_type_backplane) {
309 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
310 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
311 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
312 reg_cu |= IXGBE_TAF_ASM_PAUSE;
313 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
316 case ixgbe_fc_rx_pause:
318 * Rx Flow control is enabled and Tx Flow control is
319 * disabled by software override. Since there really
320 * isn't a way to advertise that we are capable of RX
321 * Pause ONLY, we will advertise that we support both
322 * symmetric and asymmetric Rx PAUSE, as such we fall
323 * through to the fc_full statement. Later, we will
324 * disable the adapter's ability to send PAUSE frames.
327 /* Flow control (both Rx and Tx) is enabled by SW override. */
328 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
329 if (hw->phy.media_type == ixgbe_media_type_backplane)
330 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
331 IXGBE_AUTOC_ASM_PAUSE;
332 else if (hw->phy.media_type == ixgbe_media_type_copper)
333 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
336 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
337 "Flow control param set incorrectly\n");
338 ret_val = IXGBE_ERR_CONFIG;
343 if (hw->mac.type < ixgbe_mac_X540) {
345 * Enable auto-negotiation between the MAC & PHY;
346 * the MAC will advertise clause 37 flow control.
348 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
349 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
351 /* Disable AN timeout */
352 if (hw->fc.strict_ieee)
353 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
355 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
356 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
360 * AUTOC restart handles negotiation of 1G and 10G on backplane
361 * and copper. There is no need to set the PCS1GCTL register.
364 if (hw->phy.media_type == ixgbe_media_type_backplane) {
365 reg_bp |= IXGBE_AUTOC_AN_RESTART;
366 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
369 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
370 (ixgbe_device_supports_autoneg_fc(hw))) {
371 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
372 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
375 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
381 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
382 * @hw: pointer to hardware structure
384 * Starts the hardware by filling the bus info structure and media type, clears
385 * all on chip counters, initializes receive address registers, multicast
386 * table, VLAN filter table, calls routine to set up link and flow control
387 * settings, and leaves transmit and receive units disabled and uninitialized
389 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
395 DEBUGFUNC("ixgbe_start_hw_generic");
397 /* Set the media type */
398 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
400 /* PHY ops initialization must be done in reset_hw() */
402 /* Clear the VLAN filter table */
403 hw->mac.ops.clear_vfta(hw);
405 /* Clear statistics registers */
406 hw->mac.ops.clear_hw_cntrs(hw);
408 /* Set No Snoop Disable */
409 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
410 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
411 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
412 IXGBE_WRITE_FLUSH(hw);
414 /* Setup flow control */
415 ret_val = ixgbe_setup_fc(hw);
416 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
417 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
421 /* Cache bit indicating need for crosstalk fix */
422 switch (hw->mac.type) {
423 case ixgbe_mac_82599EB:
424 case ixgbe_mac_X550EM_x:
425 case ixgbe_mac_X550EM_a:
426 hw->mac.ops.get_device_caps(hw, &device_caps);
427 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
428 hw->need_crosstalk_fix = false;
430 hw->need_crosstalk_fix = true;
433 hw->need_crosstalk_fix = false;
437 /* Clear adapter stopped flag */
438 hw->adapter_stopped = false;
440 return IXGBE_SUCCESS;
444 * ixgbe_start_hw_gen2 - Init sequence for common device family
445 * @hw: pointer to hw structure
447 * Performs the init sequence common to the second generation
449 * Devices in the second generation:
453 s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
458 /* Clear the rate limiters */
459 for (i = 0; i < hw->mac.max_tx_queues; i++) {
460 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
461 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
463 IXGBE_WRITE_FLUSH(hw);
465 /* Disable relaxed ordering */
466 for (i = 0; i < hw->mac.max_tx_queues; i++) {
467 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
468 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
469 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
472 for (i = 0; i < hw->mac.max_rx_queues; i++) {
473 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
474 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
475 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
476 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
479 return IXGBE_SUCCESS;
483 * ixgbe_init_hw_generic - Generic hardware initialization
484 * @hw: pointer to hardware structure
486 * Initialize the hardware by resetting the hardware, filling the bus info
487 * structure and media type, clears all on chip counters, initializes receive
488 * address registers, multicast table, VLAN filter table, calls routine to set
489 * up link and flow control settings, and leaves transmit and receive units
490 * disabled and uninitialized
492 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
496 DEBUGFUNC("ixgbe_init_hw_generic");
498 /* Reset the hardware */
499 status = hw->mac.ops.reset_hw(hw);
501 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
503 status = hw->mac.ops.start_hw(hw);
506 /* Initialize the LED link active for LED blink support */
507 hw->mac.ops.init_led_link_act(hw);
509 if (status != IXGBE_SUCCESS)
510 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
516 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
517 * @hw: pointer to hardware structure
519 * Clears all hardware statistics counters by reading them from the hardware
520 * Statistics counters are clear on read.
522 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
526 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
528 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
529 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
530 IXGBE_READ_REG(hw, IXGBE_ERRBC);
531 IXGBE_READ_REG(hw, IXGBE_MSPDC);
532 for (i = 0; i < 8; i++)
533 IXGBE_READ_REG(hw, IXGBE_MPC(i));
535 IXGBE_READ_REG(hw, IXGBE_MLFC);
536 IXGBE_READ_REG(hw, IXGBE_MRFC);
537 IXGBE_READ_REG(hw, IXGBE_RLEC);
538 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
539 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
540 if (hw->mac.type >= ixgbe_mac_82599EB) {
541 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
542 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
544 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
545 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
548 for (i = 0; i < 8; i++) {
549 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
550 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
551 if (hw->mac.type >= ixgbe_mac_82599EB) {
552 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
553 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
555 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
556 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
559 if (hw->mac.type >= ixgbe_mac_82599EB)
560 for (i = 0; i < 8; i++)
561 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
562 IXGBE_READ_REG(hw, IXGBE_PRC64);
563 IXGBE_READ_REG(hw, IXGBE_PRC127);
564 IXGBE_READ_REG(hw, IXGBE_PRC255);
565 IXGBE_READ_REG(hw, IXGBE_PRC511);
566 IXGBE_READ_REG(hw, IXGBE_PRC1023);
567 IXGBE_READ_REG(hw, IXGBE_PRC1522);
568 IXGBE_READ_REG(hw, IXGBE_GPRC);
569 IXGBE_READ_REG(hw, IXGBE_BPRC);
570 IXGBE_READ_REG(hw, IXGBE_MPRC);
571 IXGBE_READ_REG(hw, IXGBE_GPTC);
572 IXGBE_READ_REG(hw, IXGBE_GORCL);
573 IXGBE_READ_REG(hw, IXGBE_GORCH);
574 IXGBE_READ_REG(hw, IXGBE_GOTCL);
575 IXGBE_READ_REG(hw, IXGBE_GOTCH);
576 if (hw->mac.type == ixgbe_mac_82598EB)
577 for (i = 0; i < 8; i++)
578 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
579 IXGBE_READ_REG(hw, IXGBE_RUC);
580 IXGBE_READ_REG(hw, IXGBE_RFC);
581 IXGBE_READ_REG(hw, IXGBE_ROC);
582 IXGBE_READ_REG(hw, IXGBE_RJC);
583 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
584 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
585 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
586 IXGBE_READ_REG(hw, IXGBE_TORL);
587 IXGBE_READ_REG(hw, IXGBE_TORH);
588 IXGBE_READ_REG(hw, IXGBE_TPR);
589 IXGBE_READ_REG(hw, IXGBE_TPT);
590 IXGBE_READ_REG(hw, IXGBE_PTC64);
591 IXGBE_READ_REG(hw, IXGBE_PTC127);
592 IXGBE_READ_REG(hw, IXGBE_PTC255);
593 IXGBE_READ_REG(hw, IXGBE_PTC511);
594 IXGBE_READ_REG(hw, IXGBE_PTC1023);
595 IXGBE_READ_REG(hw, IXGBE_PTC1522);
596 IXGBE_READ_REG(hw, IXGBE_MPTC);
597 IXGBE_READ_REG(hw, IXGBE_BPTC);
598 for (i = 0; i < 16; i++) {
599 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
600 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
601 if (hw->mac.type >= ixgbe_mac_82599EB) {
602 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
603 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
604 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
605 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
606 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
608 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
609 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
613 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
615 ixgbe_identify_phy(hw);
616 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
617 IXGBE_MDIO_PCS_DEV_TYPE, &i);
618 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
619 IXGBE_MDIO_PCS_DEV_TYPE, &i);
620 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
621 IXGBE_MDIO_PCS_DEV_TYPE, &i);
622 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
623 IXGBE_MDIO_PCS_DEV_TYPE, &i);
626 return IXGBE_SUCCESS;
630 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
631 * @hw: pointer to hardware structure
632 * @pba_num: stores the part number string from the EEPROM
633 * @pba_num_size: part number string buffer length
635 * Reads the part number string from the EEPROM.
637 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
646 DEBUGFUNC("ixgbe_read_pba_string_generic");
648 if (pba_num == NULL) {
649 DEBUGOUT("PBA string buffer was null\n");
650 return IXGBE_ERR_INVALID_ARGUMENT;
653 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
655 DEBUGOUT("NVM Read Error\n");
659 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
661 DEBUGOUT("NVM Read Error\n");
666 * if data is not ptr guard the PBA must be in legacy format which
667 * means pba_ptr is actually our second data word for the PBA number
668 * and we can decode it into an ascii string
670 if (data != IXGBE_PBANUM_PTR_GUARD) {
671 DEBUGOUT("NVM PBA number is not stored as string\n");
673 /* we will need 11 characters to store the PBA */
674 if (pba_num_size < 11) {
675 DEBUGOUT("PBA string buffer too small\n");
676 return IXGBE_ERR_NO_SPACE;
679 /* extract hex string from data and pba_ptr */
680 pba_num[0] = (data >> 12) & 0xF;
681 pba_num[1] = (data >> 8) & 0xF;
682 pba_num[2] = (data >> 4) & 0xF;
683 pba_num[3] = data & 0xF;
684 pba_num[4] = (pba_ptr >> 12) & 0xF;
685 pba_num[5] = (pba_ptr >> 8) & 0xF;
688 pba_num[8] = (pba_ptr >> 4) & 0xF;
689 pba_num[9] = pba_ptr & 0xF;
691 /* put a null character on the end of our string */
694 /* switch all the data but the '-' to hex char */
695 for (offset = 0; offset < 10; offset++) {
696 if (pba_num[offset] < 0xA)
697 pba_num[offset] += '0';
698 else if (pba_num[offset] < 0x10)
699 pba_num[offset] += 'A' - 0xA;
702 return IXGBE_SUCCESS;
705 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
707 DEBUGOUT("NVM Read Error\n");
711 if (length == 0xFFFF || length == 0) {
712 DEBUGOUT("NVM PBA number section invalid length\n");
713 return IXGBE_ERR_PBA_SECTION;
716 /* check if pba_num buffer is big enough */
717 if (pba_num_size < (((u32)length * 2) - 1)) {
718 DEBUGOUT("PBA string buffer too small\n");
719 return IXGBE_ERR_NO_SPACE;
722 /* trim pba length from start of string */
726 for (offset = 0; offset < length; offset++) {
727 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
729 DEBUGOUT("NVM Read Error\n");
732 pba_num[offset * 2] = (u8)(data >> 8);
733 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
735 pba_num[offset * 2] = '\0';
737 return IXGBE_SUCCESS;
741 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
742 * @hw: pointer to hardware structure
743 * @pba_num: stores the part number from the EEPROM
745 * Reads the part number from the EEPROM.
747 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
752 DEBUGFUNC("ixgbe_read_pba_num_generic");
754 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
756 DEBUGOUT("NVM Read Error\n");
758 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
759 DEBUGOUT("NVM Not supported\n");
760 return IXGBE_NOT_IMPLEMENTED;
762 *pba_num = (u32)(data << 16);
764 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
766 DEBUGOUT("NVM Read Error\n");
771 return IXGBE_SUCCESS;
776 * @hw: pointer to the HW structure
777 * @eeprom_buf: optional pointer to EEPROM image
778 * @eeprom_buf_size: size of EEPROM image in words
779 * @max_pba_block_size: PBA block size limit
780 * @pba: pointer to output PBA structure
782 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
783 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
786 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
787 u32 eeprom_buf_size, u16 max_pba_block_size,
788 struct ixgbe_pba *pba)
794 return IXGBE_ERR_PARAM;
796 if (eeprom_buf == NULL) {
797 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
802 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
803 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
804 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
806 return IXGBE_ERR_PARAM;
810 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
811 if (pba->pba_block == NULL)
812 return IXGBE_ERR_PARAM;
814 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
820 if (pba_block_size > max_pba_block_size)
821 return IXGBE_ERR_PARAM;
823 if (eeprom_buf == NULL) {
824 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
830 if (eeprom_buf_size > (u32)(pba->word[1] +
832 memcpy(pba->pba_block,
833 &eeprom_buf[pba->word[1]],
834 pba_block_size * sizeof(u16));
836 return IXGBE_ERR_PARAM;
841 return IXGBE_SUCCESS;
845 * ixgbe_write_pba_raw
846 * @hw: pointer to the HW structure
847 * @eeprom_buf: optional pointer to EEPROM image
848 * @eeprom_buf_size: size of EEPROM image in words
849 * @pba: pointer to PBA structure
851 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
852 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
855 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
856 u32 eeprom_buf_size, struct ixgbe_pba *pba)
861 return IXGBE_ERR_PARAM;
863 if (eeprom_buf == NULL) {
864 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
869 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
870 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
871 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
873 return IXGBE_ERR_PARAM;
877 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
878 if (pba->pba_block == NULL)
879 return IXGBE_ERR_PARAM;
881 if (eeprom_buf == NULL) {
882 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
888 if (eeprom_buf_size > (u32)(pba->word[1] +
889 pba->pba_block[0])) {
890 memcpy(&eeprom_buf[pba->word[1]],
892 pba->pba_block[0] * sizeof(u16));
894 return IXGBE_ERR_PARAM;
899 return IXGBE_SUCCESS;
903 * ixgbe_get_pba_block_size
904 * @hw: pointer to the HW structure
905 * @eeprom_buf: optional pointer to EEPROM image
906 * @eeprom_buf_size: size of EEPROM image in words
907 * @pba_data_size: pointer to output variable
909 * Returns the size of the PBA block in words. Function operates on EEPROM
910 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
914 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
915 u32 eeprom_buf_size, u16 *pba_block_size)
921 DEBUGFUNC("ixgbe_get_pba_block_size");
923 if (eeprom_buf == NULL) {
924 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
929 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
930 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
931 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
933 return IXGBE_ERR_PARAM;
937 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
938 if (eeprom_buf == NULL) {
939 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
944 if (eeprom_buf_size > pba_word[1])
945 length = eeprom_buf[pba_word[1] + 0];
947 return IXGBE_ERR_PARAM;
950 if (length == 0xFFFF || length == 0)
951 return IXGBE_ERR_PBA_SECTION;
953 /* PBA number in legacy format, there is no PBA Block. */
957 if (pba_block_size != NULL)
958 *pba_block_size = length;
960 return IXGBE_SUCCESS;
964 * ixgbe_get_mac_addr_generic - Generic get MAC address
965 * @hw: pointer to hardware structure
966 * @mac_addr: Adapter MAC address
968 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
969 * A reset of the adapter must be performed prior to calling this function
970 * in order for the MAC address to have been loaded from the EEPROM into RAR0
972 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
978 DEBUGFUNC("ixgbe_get_mac_addr_generic");
980 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
981 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
983 for (i = 0; i < 4; i++)
984 mac_addr[i] = (u8)(rar_low >> (i*8));
986 for (i = 0; i < 2; i++)
987 mac_addr[i+4] = (u8)(rar_high >> (i*8));
989 return IXGBE_SUCCESS;
993 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
994 * @hw: pointer to hardware structure
995 * @link_status: the link status returned by the PCI config space
997 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
999 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
1001 struct ixgbe_mac_info *mac = &hw->mac;
1003 if (hw->bus.type == ixgbe_bus_type_unknown)
1004 hw->bus.type = ixgbe_bus_type_pci_express;
1006 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
1007 case IXGBE_PCI_LINK_WIDTH_1:
1008 hw->bus.width = ixgbe_bus_width_pcie_x1;
1010 case IXGBE_PCI_LINK_WIDTH_2:
1011 hw->bus.width = ixgbe_bus_width_pcie_x2;
1013 case IXGBE_PCI_LINK_WIDTH_4:
1014 hw->bus.width = ixgbe_bus_width_pcie_x4;
1016 case IXGBE_PCI_LINK_WIDTH_8:
1017 hw->bus.width = ixgbe_bus_width_pcie_x8;
1020 hw->bus.width = ixgbe_bus_width_unknown;
1024 switch (link_status & IXGBE_PCI_LINK_SPEED) {
1025 case IXGBE_PCI_LINK_SPEED_2500:
1026 hw->bus.speed = ixgbe_bus_speed_2500;
1028 case IXGBE_PCI_LINK_SPEED_5000:
1029 hw->bus.speed = ixgbe_bus_speed_5000;
1031 case IXGBE_PCI_LINK_SPEED_8000:
1032 hw->bus.speed = ixgbe_bus_speed_8000;
1035 hw->bus.speed = ixgbe_bus_speed_unknown;
1039 mac->ops.set_lan_id(hw);
1043 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1044 * @hw: pointer to hardware structure
1046 * Gets the PCI bus info (speed, width, type) then calls helper function to
1047 * store this data within the ixgbe_hw structure.
1049 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1053 DEBUGFUNC("ixgbe_get_bus_info_generic");
1055 /* Get the negotiated link width and speed from PCI config space */
1056 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1058 ixgbe_set_pci_config_data_generic(hw, link_status);
1060 return IXGBE_SUCCESS;
1064 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1065 * @hw: pointer to the HW structure
1067 * Determines the LAN function id by reading memory-mapped registers and swaps
1068 * the port value if requested, and set MAC instance for devices that share
1071 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1073 struct ixgbe_bus_info *bus = &hw->bus;
1077 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1079 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1080 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1081 bus->lan_id = (u8)bus->func;
1083 /* check for a port swap */
1084 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1085 if (reg & IXGBE_FACTPS_LFS)
1088 /* Get MAC instance from EEPROM for configuring CS4227 */
1089 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1090 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1091 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1092 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1097 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1098 * @hw: pointer to hardware structure
1100 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1101 * disables transmit and receive units. The adapter_stopped flag is used by
1102 * the shared code and drivers to determine if the adapter is in a stopped
1103 * state and should not touch the hardware.
1105 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1110 DEBUGFUNC("ixgbe_stop_adapter_generic");
1113 * Set the adapter_stopped flag so other driver functions stop touching
1116 hw->adapter_stopped = true;
1118 /* Disable the receive unit */
1119 ixgbe_disable_rx(hw);
1121 /* Clear interrupt mask to stop interrupts from being generated */
1122 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1124 /* Clear any pending interrupts, flush previous writes */
1125 IXGBE_READ_REG(hw, IXGBE_EICR);
1127 /* Disable the transmit unit. Each queue must be disabled. */
1128 for (i = 0; i < hw->mac.max_tx_queues; i++)
1129 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1131 /* Disable the receive unit by stopping each queue */
1132 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1133 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1134 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1135 reg_val |= IXGBE_RXDCTL_SWFLSH;
1136 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1139 /* flush all queues disables */
1140 IXGBE_WRITE_FLUSH(hw);
1144 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1145 * access and verify no pending requests
1147 return ixgbe_disable_pcie_master(hw);
1151 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1152 * @hw: pointer to hardware structure
1154 * Store the index for the link active LED. This will be used to support
1157 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1159 struct ixgbe_mac_info *mac = &hw->mac;
1160 u32 led_reg, led_mode;
1163 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1165 /* Get LED link active from the LEDCTL register */
1166 for (i = 0; i < 4; i++) {
1167 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1169 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1170 IXGBE_LED_LINK_ACTIVE) {
1171 mac->led_link_act = i;
1172 return IXGBE_SUCCESS;
1177 * If LEDCTL register does not have the LED link active set, then use
1178 * known MAC defaults.
1180 switch (hw->mac.type) {
1181 case ixgbe_mac_X550EM_a:
1182 case ixgbe_mac_X550EM_x:
1183 mac->led_link_act = 1;
1186 mac->led_link_act = 2;
1188 return IXGBE_SUCCESS;
1192 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1193 * @hw: pointer to hardware structure
1194 * @index: led number to turn on
1196 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1198 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1200 DEBUGFUNC("ixgbe_led_on_generic");
1203 return IXGBE_ERR_PARAM;
1205 /* To turn on the LED, set mode to ON. */
1206 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1207 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1208 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1209 IXGBE_WRITE_FLUSH(hw);
1211 return IXGBE_SUCCESS;
1215 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1216 * @hw: pointer to hardware structure
1217 * @index: led number to turn off
1219 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1221 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1223 DEBUGFUNC("ixgbe_led_off_generic");
1226 return IXGBE_ERR_PARAM;
1228 /* To turn off the LED, set mode to OFF. */
1229 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1230 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1231 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1232 IXGBE_WRITE_FLUSH(hw);
1234 return IXGBE_SUCCESS;
1238 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1239 * @hw: pointer to hardware structure
1241 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1242 * ixgbe_hw struct in order to set up EEPROM access.
1244 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1246 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1250 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1252 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1253 eeprom->type = ixgbe_eeprom_none;
1254 /* Set default semaphore delay to 10ms which is a well
1256 eeprom->semaphore_delay = 10;
1257 /* Clear EEPROM page size, it will be initialized as needed */
1258 eeprom->word_page_size = 0;
1261 * Check for EEPROM present first.
1262 * If not present leave as none
1264 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1265 if (eec & IXGBE_EEC_PRES) {
1266 eeprom->type = ixgbe_eeprom_spi;
1269 * SPI EEPROM is assumed here. This code would need to
1270 * change if a future EEPROM is not SPI.
1272 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1273 IXGBE_EEC_SIZE_SHIFT);
1274 eeprom->word_size = 1 << (eeprom_size +
1275 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1278 if (eec & IXGBE_EEC_ADDR_SIZE)
1279 eeprom->address_bits = 16;
1281 eeprom->address_bits = 8;
1282 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1283 "%d\n", eeprom->type, eeprom->word_size,
1284 eeprom->address_bits);
1287 return IXGBE_SUCCESS;
1291 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1292 * @hw: pointer to hardware structure
1293 * @offset: offset within the EEPROM to write
1294 * @words: number of word(s)
1295 * @data: 16 bit word(s) to write to EEPROM
1297 * Reads 16 bit word(s) from EEPROM through bit-bang method
1299 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1300 u16 words, u16 *data)
1302 s32 status = IXGBE_SUCCESS;
1305 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1307 hw->eeprom.ops.init_params(hw);
1310 status = IXGBE_ERR_INVALID_ARGUMENT;
1314 if (offset + words > hw->eeprom.word_size) {
1315 status = IXGBE_ERR_EEPROM;
1320 * The EEPROM page size cannot be queried from the chip. We do lazy
1321 * initialization. It is worth to do that when we write large buffer.
1323 if ((hw->eeprom.word_page_size == 0) &&
1324 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1325 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1328 * We cannot hold synchronization semaphores for too long
1329 * to avoid other entity starvation. However it is more efficient
1330 * to read in bursts than synchronizing access for each word.
1332 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1333 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1334 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1335 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1338 if (status != IXGBE_SUCCESS)
1347 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1348 * @hw: pointer to hardware structure
1349 * @offset: offset within the EEPROM to be written to
1350 * @words: number of word(s)
1351 * @data: 16 bit word(s) to be written to the EEPROM
1353 * If ixgbe_eeprom_update_checksum is not called after this function, the
1354 * EEPROM will most likely contain an invalid checksum.
1356 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1357 u16 words, u16 *data)
1363 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1365 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1367 /* Prepare the EEPROM for writing */
1368 status = ixgbe_acquire_eeprom(hw);
1370 if (status == IXGBE_SUCCESS) {
1371 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1372 ixgbe_release_eeprom(hw);
1373 status = IXGBE_ERR_EEPROM;
1377 if (status == IXGBE_SUCCESS) {
1378 for (i = 0; i < words; i++) {
1379 ixgbe_standby_eeprom(hw);
1381 /* Send the WRITE ENABLE command (8 bit opcode ) */
1382 ixgbe_shift_out_eeprom_bits(hw,
1383 IXGBE_EEPROM_WREN_OPCODE_SPI,
1384 IXGBE_EEPROM_OPCODE_BITS);
1386 ixgbe_standby_eeprom(hw);
1389 * Some SPI eeproms use the 8th address bit embedded
1392 if ((hw->eeprom.address_bits == 8) &&
1393 ((offset + i) >= 128))
1394 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1396 /* Send the Write command (8-bit opcode + addr) */
1397 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1398 IXGBE_EEPROM_OPCODE_BITS);
1399 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1400 hw->eeprom.address_bits);
1402 page_size = hw->eeprom.word_page_size;
1404 /* Send the data in burst via SPI*/
1407 word = (word >> 8) | (word << 8);
1408 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1413 /* do not wrap around page */
1414 if (((offset + i) & (page_size - 1)) ==
1417 } while (++i < words);
1419 ixgbe_standby_eeprom(hw);
1422 /* Done with writing - release the EEPROM */
1423 ixgbe_release_eeprom(hw);
1430 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1431 * @hw: pointer to hardware structure
1432 * @offset: offset within the EEPROM to be written to
1433 * @data: 16 bit word to be written to the EEPROM
1435 * If ixgbe_eeprom_update_checksum is not called after this function, the
1436 * EEPROM will most likely contain an invalid checksum.
1438 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1442 DEBUGFUNC("ixgbe_write_eeprom_generic");
1444 hw->eeprom.ops.init_params(hw);
1446 if (offset >= hw->eeprom.word_size) {
1447 status = IXGBE_ERR_EEPROM;
1451 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1458 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1459 * @hw: pointer to hardware structure
1460 * @offset: offset within the EEPROM to be read
1461 * @data: read 16 bit words(s) from EEPROM
1462 * @words: number of word(s)
1464 * Reads 16 bit word(s) from EEPROM through bit-bang method
1466 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1467 u16 words, u16 *data)
1469 s32 status = IXGBE_SUCCESS;
1472 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1474 hw->eeprom.ops.init_params(hw);
1477 status = IXGBE_ERR_INVALID_ARGUMENT;
1481 if (offset + words > hw->eeprom.word_size) {
1482 status = IXGBE_ERR_EEPROM;
1487 * We cannot hold synchronization semaphores for too long
1488 * to avoid other entity starvation. However it is more efficient
1489 * to read in bursts than synchronizing access for each word.
1491 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1492 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1493 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1495 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1498 if (status != IXGBE_SUCCESS)
1507 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1508 * @hw: pointer to hardware structure
1509 * @offset: offset within the EEPROM to be read
1510 * @words: number of word(s)
1511 * @data: read 16 bit word(s) from EEPROM
1513 * Reads 16 bit word(s) from EEPROM through bit-bang method
1515 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1516 u16 words, u16 *data)
1520 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1523 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1525 /* Prepare the EEPROM for reading */
1526 status = ixgbe_acquire_eeprom(hw);
1528 if (status == IXGBE_SUCCESS) {
1529 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1530 ixgbe_release_eeprom(hw);
1531 status = IXGBE_ERR_EEPROM;
1535 if (status == IXGBE_SUCCESS) {
1536 for (i = 0; i < words; i++) {
1537 ixgbe_standby_eeprom(hw);
1539 * Some SPI eeproms use the 8th address bit embedded
1542 if ((hw->eeprom.address_bits == 8) &&
1543 ((offset + i) >= 128))
1544 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1546 /* Send the READ command (opcode + addr) */
1547 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1548 IXGBE_EEPROM_OPCODE_BITS);
1549 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1550 hw->eeprom.address_bits);
1552 /* Read the data. */
1553 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1554 data[i] = (word_in >> 8) | (word_in << 8);
1557 /* End this read operation */
1558 ixgbe_release_eeprom(hw);
1565 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1566 * @hw: pointer to hardware structure
1567 * @offset: offset within the EEPROM to be read
1568 * @data: read 16 bit value from EEPROM
1570 * Reads 16 bit value from EEPROM through bit-bang method
1572 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1577 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1579 hw->eeprom.ops.init_params(hw);
1581 if (offset >= hw->eeprom.word_size) {
1582 status = IXGBE_ERR_EEPROM;
1586 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1593 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1594 * @hw: pointer to hardware structure
1595 * @offset: offset of word in the EEPROM to read
1596 * @words: number of word(s)
1597 * @data: 16 bit word(s) from the EEPROM
1599 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1601 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1602 u16 words, u16 *data)
1605 s32 status = IXGBE_SUCCESS;
1608 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1610 hw->eeprom.ops.init_params(hw);
1613 status = IXGBE_ERR_INVALID_ARGUMENT;
1614 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1618 if (offset >= hw->eeprom.word_size) {
1619 status = IXGBE_ERR_EEPROM;
1620 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1624 for (i = 0; i < words; i++) {
1625 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1626 IXGBE_EEPROM_RW_REG_START;
1628 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1629 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1631 if (status == IXGBE_SUCCESS) {
1632 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1633 IXGBE_EEPROM_RW_REG_DATA);
1635 DEBUGOUT("Eeprom read timed out\n");
1644 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1645 * @hw: pointer to hardware structure
1646 * @offset: offset within the EEPROM to be used as a scratch pad
1648 * Discover EEPROM page size by writing marching data at given offset.
1649 * This function is called only when we are writing a new large buffer
1650 * at given offset so the data would be overwritten anyway.
1652 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1655 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1656 s32 status = IXGBE_SUCCESS;
1659 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1661 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1664 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1665 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1666 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1667 hw->eeprom.word_page_size = 0;
1668 if (status != IXGBE_SUCCESS)
1671 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1672 if (status != IXGBE_SUCCESS)
1676 * When writing in burst more than the actual page size
1677 * EEPROM address wraps around current page.
1679 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1681 DEBUGOUT1("Detected EEPROM page size = %d words.",
1682 hw->eeprom.word_page_size);
1688 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1689 * @hw: pointer to hardware structure
1690 * @offset: offset of word in the EEPROM to read
1691 * @data: word read from the EEPROM
1693 * Reads a 16 bit word from the EEPROM using the EERD register.
1695 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1697 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1701 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1702 * @hw: pointer to hardware structure
1703 * @offset: offset of word in the EEPROM to write
1704 * @words: number of word(s)
1705 * @data: word(s) write to the EEPROM
1707 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1709 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1710 u16 words, u16 *data)
1713 s32 status = IXGBE_SUCCESS;
1716 DEBUGFUNC("ixgbe_write_eewr_generic");
1718 hw->eeprom.ops.init_params(hw);
1721 status = IXGBE_ERR_INVALID_ARGUMENT;
1722 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1726 if (offset >= hw->eeprom.word_size) {
1727 status = IXGBE_ERR_EEPROM;
1728 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1732 for (i = 0; i < words; i++) {
1733 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1734 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1735 IXGBE_EEPROM_RW_REG_START;
1737 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1738 if (status != IXGBE_SUCCESS) {
1739 DEBUGOUT("Eeprom write EEWR timed out\n");
1743 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1745 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1746 if (status != IXGBE_SUCCESS) {
1747 DEBUGOUT("Eeprom write EEWR timed out\n");
1757 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1758 * @hw: pointer to hardware structure
1759 * @offset: offset of word in the EEPROM to write
1760 * @data: word write to the EEPROM
1762 * Write a 16 bit word to the EEPROM using the EEWR register.
1764 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1766 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1770 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1771 * @hw: pointer to hardware structure
1772 * @ee_reg: EEPROM flag for polling
1774 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1775 * read or write is done respectively.
1777 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1781 s32 status = IXGBE_ERR_EEPROM;
1783 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1785 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1786 if (ee_reg == IXGBE_NVM_POLL_READ)
1787 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1789 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1791 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1792 status = IXGBE_SUCCESS;
1798 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1799 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1800 "EEPROM read/write done polling timed out");
1806 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1807 * @hw: pointer to hardware structure
1809 * Prepares EEPROM for access using bit-bang method. This function should
1810 * be called before issuing a command to the EEPROM.
1812 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1814 s32 status = IXGBE_SUCCESS;
1818 DEBUGFUNC("ixgbe_acquire_eeprom");
1820 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1822 status = IXGBE_ERR_SWFW_SYNC;
1824 if (status == IXGBE_SUCCESS) {
1825 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1827 /* Request EEPROM Access */
1828 eec |= IXGBE_EEC_REQ;
1829 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1831 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1832 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1833 if (eec & IXGBE_EEC_GNT)
1838 /* Release if grant not acquired */
1839 if (!(eec & IXGBE_EEC_GNT)) {
1840 eec &= ~IXGBE_EEC_REQ;
1841 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1842 DEBUGOUT("Could not acquire EEPROM grant\n");
1844 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1845 status = IXGBE_ERR_EEPROM;
1848 /* Setup EEPROM for Read/Write */
1849 if (status == IXGBE_SUCCESS) {
1850 /* Clear CS and SK */
1851 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1852 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1853 IXGBE_WRITE_FLUSH(hw);
1861 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1862 * @hw: pointer to hardware structure
1864 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1866 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1868 s32 status = IXGBE_ERR_EEPROM;
1873 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1876 /* Get SMBI software semaphore between device drivers first */
1877 for (i = 0; i < timeout; i++) {
1879 * If the SMBI bit is 0 when we read it, then the bit will be
1880 * set and we have the semaphore
1882 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1883 if (!(swsm & IXGBE_SWSM_SMBI)) {
1884 status = IXGBE_SUCCESS;
1891 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1894 * this release is particularly important because our attempts
1895 * above to get the semaphore may have succeeded, and if there
1896 * was a timeout, we should unconditionally clear the semaphore
1897 * bits to free the driver to make progress
1899 ixgbe_release_eeprom_semaphore(hw);
1904 * If the SMBI bit is 0 when we read it, then the bit will be
1905 * set and we have the semaphore
1907 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1908 if (!(swsm & IXGBE_SWSM_SMBI))
1909 status = IXGBE_SUCCESS;
1912 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1913 if (status == IXGBE_SUCCESS) {
1914 for (i = 0; i < timeout; i++) {
1915 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1917 /* Set the SW EEPROM semaphore bit to request access */
1918 swsm |= IXGBE_SWSM_SWESMBI;
1919 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1922 * If we set the bit successfully then we got the
1925 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1926 if (swsm & IXGBE_SWSM_SWESMBI)
1933 * Release semaphores and return error if SW EEPROM semaphore
1934 * was not granted because we don't have access to the EEPROM
1937 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1938 "SWESMBI Software EEPROM semaphore not granted.\n");
1939 ixgbe_release_eeprom_semaphore(hw);
1940 status = IXGBE_ERR_EEPROM;
1943 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1944 "Software semaphore SMBI between device drivers "
1952 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1953 * @hw: pointer to hardware structure
1955 * This function clears hardware semaphore bits.
1957 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1961 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1963 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1965 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1966 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1967 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1968 IXGBE_WRITE_FLUSH(hw);
1972 * ixgbe_ready_eeprom - Polls for EEPROM ready
1973 * @hw: pointer to hardware structure
1975 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1977 s32 status = IXGBE_SUCCESS;
1981 DEBUGFUNC("ixgbe_ready_eeprom");
1984 * Read "Status Register" repeatedly until the LSB is cleared. The
1985 * EEPROM will signal that the command has been completed by clearing
1986 * bit 0 of the internal status register. If it's not cleared within
1987 * 5 milliseconds, then error out.
1989 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1990 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1991 IXGBE_EEPROM_OPCODE_BITS);
1992 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1993 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1997 ixgbe_standby_eeprom(hw);
2001 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
2002 * devices (and only 0-5mSec on 5V devices)
2004 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
2005 DEBUGOUT("SPI EEPROM Status error\n");
2006 status = IXGBE_ERR_EEPROM;
2013 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
2014 * @hw: pointer to hardware structure
2016 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
2020 DEBUGFUNC("ixgbe_standby_eeprom");
2022 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2024 /* Toggle CS to flush commands */
2025 eec |= IXGBE_EEC_CS;
2026 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2027 IXGBE_WRITE_FLUSH(hw);
2029 eec &= ~IXGBE_EEC_CS;
2030 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2031 IXGBE_WRITE_FLUSH(hw);
2036 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2037 * @hw: pointer to hardware structure
2038 * @data: data to send to the EEPROM
2039 * @count: number of bits to shift out
2041 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2048 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2050 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2053 * Mask is used to shift "count" bits of "data" out to the EEPROM
2054 * one bit at a time. Determine the starting bit based on count
2056 mask = 0x01 << (count - 1);
2058 for (i = 0; i < count; i++) {
2060 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2061 * "1", and then raising and then lowering the clock (the SK
2062 * bit controls the clock input to the EEPROM). A "0" is
2063 * shifted out to the EEPROM by setting "DI" to "0" and then
2064 * raising and then lowering the clock.
2067 eec |= IXGBE_EEC_DI;
2069 eec &= ~IXGBE_EEC_DI;
2071 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2072 IXGBE_WRITE_FLUSH(hw);
2076 ixgbe_raise_eeprom_clk(hw, &eec);
2077 ixgbe_lower_eeprom_clk(hw, &eec);
2080 * Shift mask to signify next bit of data to shift in to the
2086 /* We leave the "DI" bit set to "0" when we leave this routine. */
2087 eec &= ~IXGBE_EEC_DI;
2088 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2089 IXGBE_WRITE_FLUSH(hw);
2093 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2094 * @hw: pointer to hardware structure
2096 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2102 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2105 * In order to read a register from the EEPROM, we need to shift
2106 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2107 * the clock input to the EEPROM (setting the SK bit), and then reading
2108 * the value of the "DO" bit. During this "shifting in" process the
2109 * "DI" bit should always be clear.
2111 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2113 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2115 for (i = 0; i < count; i++) {
2117 ixgbe_raise_eeprom_clk(hw, &eec);
2119 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2121 eec &= ~(IXGBE_EEC_DI);
2122 if (eec & IXGBE_EEC_DO)
2125 ixgbe_lower_eeprom_clk(hw, &eec);
2132 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2133 * @hw: pointer to hardware structure
2134 * @eec: EEC register's current value
2136 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2138 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2141 * Raise the clock input to the EEPROM
2142 * (setting the SK bit), then delay
2144 *eec = *eec | IXGBE_EEC_SK;
2145 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2146 IXGBE_WRITE_FLUSH(hw);
2151 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2152 * @hw: pointer to hardware structure
2153 * @eecd: EECD's current value
2155 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2157 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2160 * Lower the clock input to the EEPROM (clearing the SK bit), then
2163 *eec = *eec & ~IXGBE_EEC_SK;
2164 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2165 IXGBE_WRITE_FLUSH(hw);
2170 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2171 * @hw: pointer to hardware structure
2173 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2177 DEBUGFUNC("ixgbe_release_eeprom");
2179 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2181 eec |= IXGBE_EEC_CS; /* Pull CS high */
2182 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2184 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2185 IXGBE_WRITE_FLUSH(hw);
2189 /* Stop requesting EEPROM access */
2190 eec &= ~IXGBE_EEC_REQ;
2191 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2193 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2195 /* Delay before attempt to obtain semaphore again to allow FW access */
2196 msec_delay(hw->eeprom.semaphore_delay);
2200 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2201 * @hw: pointer to hardware structure
2203 * Returns a negative error code on error, or the 16-bit checksum
2205 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2214 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2216 /* Include 0x0-0x3F in the checksum */
2217 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2218 if (hw->eeprom.ops.read(hw, i, &word)) {
2219 DEBUGOUT("EEPROM read failed\n");
2220 return IXGBE_ERR_EEPROM;
2225 /* Include all data from pointers except for the fw pointer */
2226 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2227 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2228 DEBUGOUT("EEPROM read failed\n");
2229 return IXGBE_ERR_EEPROM;
2232 /* If the pointer seems invalid */
2233 if (pointer == 0xFFFF || pointer == 0)
2236 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2237 DEBUGOUT("EEPROM read failed\n");
2238 return IXGBE_ERR_EEPROM;
2241 if (length == 0xFFFF || length == 0)
2244 for (j = pointer + 1; j <= pointer + length; j++) {
2245 if (hw->eeprom.ops.read(hw, j, &word)) {
2246 DEBUGOUT("EEPROM read failed\n");
2247 return IXGBE_ERR_EEPROM;
2253 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2255 return (s32)checksum;
2259 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2260 * @hw: pointer to hardware structure
2261 * @checksum_val: calculated checksum
2263 * Performs checksum calculation and validates the EEPROM checksum. If the
2264 * caller does not need checksum_val, the value can be NULL.
2266 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2271 u16 read_checksum = 0;
2273 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2275 /* Read the first word from the EEPROM. If this times out or fails, do
2276 * not continue or we could be in for a very long wait while every
2279 status = hw->eeprom.ops.read(hw, 0, &checksum);
2281 DEBUGOUT("EEPROM read failed\n");
2285 status = hw->eeprom.ops.calc_checksum(hw);
2289 checksum = (u16)(status & 0xffff);
2291 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2293 DEBUGOUT("EEPROM read failed\n");
2297 /* Verify read checksum from EEPROM is the same as
2298 * calculated checksum
2300 if (read_checksum != checksum)
2301 status = IXGBE_ERR_EEPROM_CHECKSUM;
2303 /* If the user cares, return the calculated checksum */
2305 *checksum_val = checksum;
2311 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2312 * @hw: pointer to hardware structure
2314 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2319 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2321 /* Read the first word from the EEPROM. If this times out or fails, do
2322 * not continue or we could be in for a very long wait while every
2325 status = hw->eeprom.ops.read(hw, 0, &checksum);
2327 DEBUGOUT("EEPROM read failed\n");
2331 status = hw->eeprom.ops.calc_checksum(hw);
2335 checksum = (u16)(status & 0xffff);
2337 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2343 * ixgbe_validate_mac_addr - Validate MAC address
2344 * @mac_addr: pointer to MAC address.
2346 * Tests a MAC address to ensure it is a valid Individual Address.
2348 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2350 s32 status = IXGBE_SUCCESS;
2352 DEBUGFUNC("ixgbe_validate_mac_addr");
2354 /* Make sure it is not a multicast address */
2355 if (IXGBE_IS_MULTICAST(mac_addr)) {
2356 status = IXGBE_ERR_INVALID_MAC_ADDR;
2357 /* Not a broadcast address */
2358 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2359 status = IXGBE_ERR_INVALID_MAC_ADDR;
2360 /* Reject the zero address */
2361 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2362 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2363 status = IXGBE_ERR_INVALID_MAC_ADDR;
2369 * ixgbe_set_rar_generic - Set Rx address register
2370 * @hw: pointer to hardware structure
2371 * @index: Receive address register to write
2372 * @addr: Address to put into receive address register
2373 * @vmdq: VMDq "set" or "pool" index
2374 * @enable_addr: set flag that address is active
2376 * Puts an ethernet address into a receive address register.
2378 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2381 u32 rar_low, rar_high;
2382 u32 rar_entries = hw->mac.num_rar_entries;
2384 DEBUGFUNC("ixgbe_set_rar_generic");
2386 /* Make sure we are using a valid rar index range */
2387 if (index >= rar_entries) {
2388 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2389 "RAR index %d is out of range.\n", index);
2390 return IXGBE_ERR_INVALID_ARGUMENT;
2393 /* setup VMDq pool selection before this RAR gets enabled */
2394 hw->mac.ops.set_vmdq(hw, index, vmdq);
2397 * HW expects these in little endian so we reverse the byte
2398 * order from network order (big endian) to little endian
2400 rar_low = ((u32)addr[0] |
2401 ((u32)addr[1] << 8) |
2402 ((u32)addr[2] << 16) |
2403 ((u32)addr[3] << 24));
2405 * Some parts put the VMDq setting in the extra RAH bits,
2406 * so save everything except the lower 16 bits that hold part
2407 * of the address and the address valid bit.
2409 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2410 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2411 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2413 if (enable_addr != 0)
2414 rar_high |= IXGBE_RAH_AV;
2416 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2417 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2419 return IXGBE_SUCCESS;
2423 * ixgbe_clear_rar_generic - Remove Rx address register
2424 * @hw: pointer to hardware structure
2425 * @index: Receive address register to write
2427 * Clears an ethernet address from a receive address register.
2429 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2432 u32 rar_entries = hw->mac.num_rar_entries;
2434 DEBUGFUNC("ixgbe_clear_rar_generic");
2436 /* Make sure we are using a valid rar index range */
2437 if (index >= rar_entries) {
2438 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2439 "RAR index %d is out of range.\n", index);
2440 return IXGBE_ERR_INVALID_ARGUMENT;
2444 * Some parts put the VMDq setting in the extra RAH bits,
2445 * so save everything except the lower 16 bits that hold part
2446 * of the address and the address valid bit.
2448 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2449 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2451 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2452 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2454 /* clear VMDq pool/queue selection for this RAR */
2455 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2457 return IXGBE_SUCCESS;
2461 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2462 * @hw: pointer to hardware structure
2464 * Places the MAC address in receive address register 0 and clears the rest
2465 * of the receive address registers. Clears the multicast table. Assumes
2466 * the receiver is in reset when the routine is called.
2468 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2471 u32 rar_entries = hw->mac.num_rar_entries;
2473 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2476 * If the current mac address is valid, assume it is a software override
2477 * to the permanent address.
2478 * Otherwise, use the permanent address from the eeprom.
2480 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2481 IXGBE_ERR_INVALID_MAC_ADDR) {
2482 /* Get the MAC address from the RAR0 for later reference */
2483 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2485 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2486 hw->mac.addr[0], hw->mac.addr[1],
2488 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2489 hw->mac.addr[4], hw->mac.addr[5]);
2491 /* Setup the receive address. */
2492 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2493 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2494 hw->mac.addr[0], hw->mac.addr[1],
2496 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2497 hw->mac.addr[4], hw->mac.addr[5]);
2499 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2502 /* clear VMDq pool/queue selection for RAR 0 */
2503 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2505 hw->addr_ctrl.overflow_promisc = 0;
2507 hw->addr_ctrl.rar_used_count = 1;
2509 /* Zero out the other receive addresses. */
2510 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2511 for (i = 1; i < rar_entries; i++) {
2512 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2513 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2517 hw->addr_ctrl.mta_in_use = 0;
2518 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2520 DEBUGOUT(" Clearing MTA\n");
2521 for (i = 0; i < hw->mac.mcft_size; i++)
2522 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2524 ixgbe_init_uta_tables(hw);
2526 return IXGBE_SUCCESS;
2530 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2531 * @hw: pointer to hardware structure
2532 * @addr: new address
2534 * Adds it to unused receive address register or goes into promiscuous mode.
2536 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2538 u32 rar_entries = hw->mac.num_rar_entries;
2541 DEBUGFUNC("ixgbe_add_uc_addr");
2543 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2544 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2547 * Place this address in the RAR if there is room,
2548 * else put the controller into promiscuous mode
2550 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2551 rar = hw->addr_ctrl.rar_used_count;
2552 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2553 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2554 hw->addr_ctrl.rar_used_count++;
2556 hw->addr_ctrl.overflow_promisc++;
2559 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2563 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2564 * @hw: pointer to hardware structure
2565 * @addr_list: the list of new addresses
2566 * @addr_count: number of addresses
2567 * @next: iterator function to walk the address list
2569 * The given list replaces any existing list. Clears the secondary addrs from
2570 * receive address registers. Uses unused receive address registers for the
2571 * first secondary addresses, and falls back to promiscuous mode as needed.
2573 * Drivers using secondary unicast addresses must set user_set_promisc when
2574 * manually putting the device into promiscuous mode.
2576 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2577 u32 addr_count, ixgbe_mc_addr_itr next)
2581 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2586 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2589 * Clear accounting of old secondary address list,
2590 * don't count RAR[0]
2592 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2593 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2594 hw->addr_ctrl.overflow_promisc = 0;
2596 /* Zero out the other receive addresses */
2597 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2598 for (i = 0; i < uc_addr_in_use; i++) {
2599 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2600 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2603 /* Add the new addresses */
2604 for (i = 0; i < addr_count; i++) {
2605 DEBUGOUT(" Adding the secondary addresses:\n");
2606 addr = next(hw, &addr_list, &vmdq);
2607 ixgbe_add_uc_addr(hw, addr, vmdq);
2610 if (hw->addr_ctrl.overflow_promisc) {
2611 /* enable promisc if not already in overflow or set by user */
2612 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2613 DEBUGOUT(" Entering address overflow promisc mode\n");
2614 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2615 fctrl |= IXGBE_FCTRL_UPE;
2616 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2619 /* only disable if set by overflow, not by user */
2620 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2621 DEBUGOUT(" Leaving address overflow promisc mode\n");
2622 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2623 fctrl &= ~IXGBE_FCTRL_UPE;
2624 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2628 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2629 return IXGBE_SUCCESS;
2633 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2634 * @hw: pointer to hardware structure
2635 * @mc_addr: the multicast address
2637 * Extracts the 12 bits, from a multicast address, to determine which
2638 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2639 * incoming rx multicast addresses, to determine the bit-vector to check in
2640 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2641 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2642 * to mc_filter_type.
2644 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2648 DEBUGFUNC("ixgbe_mta_vector");
2650 switch (hw->mac.mc_filter_type) {
2651 case 0: /* use bits [47:36] of the address */
2652 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2654 case 1: /* use bits [46:35] of the address */
2655 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2657 case 2: /* use bits [45:34] of the address */
2658 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2660 case 3: /* use bits [43:32] of the address */
2661 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2663 default: /* Invalid mc_filter_type */
2664 DEBUGOUT("MC filter type param set incorrectly\n");
2669 /* vector can only be 12-bits or boundary will be exceeded */
2675 * ixgbe_set_mta - Set bit-vector in multicast table
2676 * @hw: pointer to hardware structure
2677 * @hash_value: Multicast address hash value
2679 * Sets the bit-vector in the multicast table.
2681 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2687 DEBUGFUNC("ixgbe_set_mta");
2689 hw->addr_ctrl.mta_in_use++;
2691 vector = ixgbe_mta_vector(hw, mc_addr);
2692 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2695 * The MTA is a register array of 128 32-bit registers. It is treated
2696 * like an array of 4096 bits. We want to set bit
2697 * BitArray[vector_value]. So we figure out what register the bit is
2698 * in, read it, OR in the new bit, then write back the new value. The
2699 * register is determined by the upper 7 bits of the vector value and
2700 * the bit within that register are determined by the lower 5 bits of
2703 vector_reg = (vector >> 5) & 0x7F;
2704 vector_bit = vector & 0x1F;
2705 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2709 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2710 * @hw: pointer to hardware structure
2711 * @mc_addr_list: the list of new multicast addresses
2712 * @mc_addr_count: number of addresses
2713 * @next: iterator function to walk the multicast address list
2714 * @clear: flag, when set clears the table beforehand
2716 * When the clear flag is set, the given list replaces any existing list.
2717 * Hashes the given addresses into the multicast table.
2719 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2720 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2726 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2729 * Set the new number of MC addresses that we are being requested to
2732 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2733 hw->addr_ctrl.mta_in_use = 0;
2735 /* Clear mta_shadow */
2737 DEBUGOUT(" Clearing MTA\n");
2738 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2741 /* Update mta_shadow */
2742 for (i = 0; i < mc_addr_count; i++) {
2743 DEBUGOUT(" Adding the multicast addresses:\n");
2744 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2748 for (i = 0; i < hw->mac.mcft_size; i++)
2749 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2750 hw->mac.mta_shadow[i]);
2752 if (hw->addr_ctrl.mta_in_use > 0)
2753 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2754 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2756 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2757 return IXGBE_SUCCESS;
2761 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2762 * @hw: pointer to hardware structure
2764 * Enables multicast address in RAR and the use of the multicast hash table.
2766 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2768 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2770 DEBUGFUNC("ixgbe_enable_mc_generic");
2772 if (a->mta_in_use > 0)
2773 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2774 hw->mac.mc_filter_type);
2776 return IXGBE_SUCCESS;
2780 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2781 * @hw: pointer to hardware structure
2783 * Disables multicast address in RAR and the use of the multicast hash table.
2785 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2787 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2789 DEBUGFUNC("ixgbe_disable_mc_generic");
2791 if (a->mta_in_use > 0)
2792 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2794 return IXGBE_SUCCESS;
2798 * ixgbe_fc_enable_generic - Enable flow control
2799 * @hw: pointer to hardware structure
2801 * Enable flow control according to the current settings.
2803 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2805 s32 ret_val = IXGBE_SUCCESS;
2806 u32 mflcn_reg, fccfg_reg;
2811 DEBUGFUNC("ixgbe_fc_enable_generic");
2813 /* Validate the water mark configuration */
2814 if (!hw->fc.pause_time) {
2815 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2819 /* Low water mark of zero causes XOFF floods */
2820 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2821 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2822 hw->fc.high_water[i]) {
2823 if (!hw->fc.low_water[i] ||
2824 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2825 DEBUGOUT("Invalid water mark configuration\n");
2826 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2832 /* Negotiate the fc mode to use */
2833 hw->mac.ops.fc_autoneg(hw);
2835 /* Disable any previous flow control settings */
2836 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2837 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2839 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2840 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2843 * The possible values of fc.current_mode are:
2844 * 0: Flow control is completely disabled
2845 * 1: Rx flow control is enabled (we can receive pause frames,
2846 * but not send pause frames).
2847 * 2: Tx flow control is enabled (we can send pause frames but
2848 * we do not support receiving pause frames).
2849 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2852 switch (hw->fc.current_mode) {
2855 * Flow control is disabled by software override or autoneg.
2856 * The code below will actually disable it in the HW.
2859 case ixgbe_fc_rx_pause:
2861 * Rx Flow control is enabled and Tx Flow control is
2862 * disabled by software override. Since there really
2863 * isn't a way to advertise that we are capable of RX
2864 * Pause ONLY, we will advertise that we support both
2865 * symmetric and asymmetric Rx PAUSE. Later, we will
2866 * disable the adapter's ability to send PAUSE frames.
2868 mflcn_reg |= IXGBE_MFLCN_RFCE;
2870 case ixgbe_fc_tx_pause:
2872 * Tx Flow control is enabled, and Rx Flow control is
2873 * disabled by software override.
2875 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2878 /* Flow control (both Rx and Tx) is enabled by SW override. */
2879 mflcn_reg |= IXGBE_MFLCN_RFCE;
2880 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2883 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2884 "Flow control param set incorrectly\n");
2885 ret_val = IXGBE_ERR_CONFIG;
2890 /* Set 802.3x based flow control settings. */
2891 mflcn_reg |= IXGBE_MFLCN_DPF;
2892 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2893 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2896 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2897 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2898 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2899 hw->fc.high_water[i]) {
2900 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2901 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2902 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2904 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2906 * In order to prevent Tx hangs when the internal Tx
2907 * switch is enabled we must set the high water mark
2908 * to the Rx packet buffer size - 24KB. This allows
2909 * the Tx switch to function even under heavy Rx
2912 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2915 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2918 /* Configure pause time (2 TCs per register) */
2919 reg = hw->fc.pause_time * 0x00010001;
2920 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2921 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2923 /* Configure flow control refresh threshold value */
2924 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2931 * ixgbe_negotiate_fc - Negotiate flow control
2932 * @hw: pointer to hardware structure
2933 * @adv_reg: flow control advertised settings
2934 * @lp_reg: link partner's flow control settings
2935 * @adv_sym: symmetric pause bit in advertisement
2936 * @adv_asm: asymmetric pause bit in advertisement
2937 * @lp_sym: symmetric pause bit in link partner advertisement
2938 * @lp_asm: asymmetric pause bit in link partner advertisement
2940 * Find the intersection between advertised settings and link partner's
2941 * advertised settings
2943 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2944 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2946 if ((!(adv_reg)) || (!(lp_reg))) {
2947 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2948 "Local or link partner's advertised flow control "
2949 "settings are NULL. Local: %x, link partner: %x\n",
2951 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2954 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2956 * Now we need to check if the user selected Rx ONLY
2957 * of pause frames. In this case, we had to advertise
2958 * FULL flow control because we could not advertise RX
2959 * ONLY. Hence, we must now check to see if we need to
2960 * turn OFF the TRANSMISSION of PAUSE frames.
2962 if (hw->fc.requested_mode == ixgbe_fc_full) {
2963 hw->fc.current_mode = ixgbe_fc_full;
2964 DEBUGOUT("Flow Control = FULL.\n");
2966 hw->fc.current_mode = ixgbe_fc_rx_pause;
2967 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2969 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2970 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2971 hw->fc.current_mode = ixgbe_fc_tx_pause;
2972 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2973 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2974 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2975 hw->fc.current_mode = ixgbe_fc_rx_pause;
2976 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2978 hw->fc.current_mode = ixgbe_fc_none;
2979 DEBUGOUT("Flow Control = NONE.\n");
2981 return IXGBE_SUCCESS;
2985 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2986 * @hw: pointer to hardware structure
2988 * Enable flow control according on 1 gig fiber.
2990 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2992 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2993 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2996 * On multispeed fiber at 1g, bail out if
2997 * - link is up but AN did not complete, or if
2998 * - link is up and AN completed but timed out
3001 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
3002 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
3003 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
3004 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
3008 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
3009 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
3011 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
3012 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
3013 IXGBE_PCS1GANA_ASM_PAUSE,
3014 IXGBE_PCS1GANA_SYM_PAUSE,
3015 IXGBE_PCS1GANA_ASM_PAUSE);
3022 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
3023 * @hw: pointer to hardware structure
3025 * Enable flow control according to IEEE clause 37.
3027 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3029 u32 links2, anlp1_reg, autoc_reg, links;
3030 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3033 * On backplane, bail out if
3034 * - backplane autoneg was not completed, or if
3035 * - we are 82599 and link partner is not AN enabled
3037 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3038 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3039 DEBUGOUT("Auto-Negotiation did not complete\n");
3043 if (hw->mac.type == ixgbe_mac_82599EB) {
3044 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3045 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3046 DEBUGOUT("Link partner is not AN enabled\n");
3051 * Read the 10g AN autoc and LP ability registers and resolve
3052 * local flow control settings accordingly
3054 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3055 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3057 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3058 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3059 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3066 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3067 * @hw: pointer to hardware structure
3069 * Enable flow control according to IEEE clause 37.
3071 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3073 u16 technology_ability_reg = 0;
3074 u16 lp_technology_ability_reg = 0;
3076 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3077 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3078 &technology_ability_reg);
3079 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3080 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3081 &lp_technology_ability_reg);
3083 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3084 (u32)lp_technology_ability_reg,
3085 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3086 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3090 * ixgbe_fc_autoneg - Configure flow control
3091 * @hw: pointer to hardware structure
3093 * Compares our advertised flow control capabilities to those advertised by
3094 * our link partner, and determines the proper flow control mode to use.
3096 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3098 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3099 ixgbe_link_speed speed;
3102 DEBUGFUNC("ixgbe_fc_autoneg");
3105 * AN should have completed when the cable was plugged in.
3106 * Look for reasons to bail out. Bail out if:
3107 * - FC autoneg is disabled, or if
3110 if (hw->fc.disable_fc_autoneg) {
3111 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3112 "Flow control autoneg is disabled");
3116 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3118 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3122 switch (hw->phy.media_type) {
3123 /* Autoneg flow control on fiber adapters */
3124 case ixgbe_media_type_fiber_qsfp:
3125 case ixgbe_media_type_fiber:
3126 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3127 ret_val = ixgbe_fc_autoneg_fiber(hw);
3130 /* Autoneg flow control on backplane adapters */
3131 case ixgbe_media_type_backplane:
3132 ret_val = ixgbe_fc_autoneg_backplane(hw);
3135 /* Autoneg flow control on copper adapters */
3136 case ixgbe_media_type_copper:
3137 if (ixgbe_device_supports_autoneg_fc(hw))
3138 ret_val = ixgbe_fc_autoneg_copper(hw);
3146 if (ret_val == IXGBE_SUCCESS) {
3147 hw->fc.fc_was_autonegged = true;
3149 hw->fc.fc_was_autonegged = false;
3150 hw->fc.current_mode = hw->fc.requested_mode;
3155 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3156 * @hw: pointer to hardware structure
3158 * System-wide timeout range is encoded in PCIe Device Control2 register.
3160 * Add 10% to specified maximum and return the number of times to poll for
3161 * completion timeout, in units of 100 microsec. Never return less than
3162 * 800 = 80 millisec.
3164 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3169 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3170 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3173 case IXGBE_PCIDEVCTRL2_65_130ms:
3174 pollcnt = 1300; /* 130 millisec */
3176 case IXGBE_PCIDEVCTRL2_260_520ms:
3177 pollcnt = 5200; /* 520 millisec */
3179 case IXGBE_PCIDEVCTRL2_1_2s:
3180 pollcnt = 20000; /* 2 sec */
3182 case IXGBE_PCIDEVCTRL2_4_8s:
3183 pollcnt = 80000; /* 8 sec */
3185 case IXGBE_PCIDEVCTRL2_17_34s:
3186 pollcnt = 34000; /* 34 sec */
3188 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3189 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3190 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3191 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3193 pollcnt = 800; /* 80 millisec minimum */
3197 /* add 10% to spec maximum */
3198 return (pollcnt * 11) / 10;
3202 * ixgbe_disable_pcie_master - Disable PCI-express master access
3203 * @hw: pointer to hardware structure
3205 * Disables PCI-Express master access and verifies there are no pending
3206 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3207 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3208 * is returned signifying master requests disabled.
3210 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3212 s32 status = IXGBE_SUCCESS;
3216 DEBUGFUNC("ixgbe_disable_pcie_master");
3218 /* Always set this bit to ensure any future transactions are blocked */
3219 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3221 /* Exit if master requests are blocked */
3222 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3223 IXGBE_REMOVED(hw->hw_addr))
3226 /* Poll for master request bit to clear */
3227 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3229 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3234 * Two consecutive resets are required via CTRL.RST per datasheet
3235 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3236 * of this need. The first reset prevents new master requests from
3237 * being issued by our device. We then must wait 1usec or more for any
3238 * remaining completions from the PCIe bus to trickle in, and then reset
3239 * again to clear out any effects they may have had on our device.
3241 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3242 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3244 if (hw->mac.type >= ixgbe_mac_X550)
3248 * Before proceeding, make sure that the PCIe block does not have
3249 * transactions pending.
3251 poll = ixgbe_pcie_timeout_poll(hw);
3252 for (i = 0; i < poll; i++) {
3254 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3255 if (IXGBE_REMOVED(hw->hw_addr))
3257 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3261 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3262 "PCIe transaction pending bit also did not clear.\n");
3263 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3270 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3271 * @hw: pointer to hardware structure
3272 * @mask: Mask to specify which semaphore to acquire
3274 * Acquires the SWFW semaphore through the GSSR register for the specified
3275 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3277 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3281 u32 fwmask = mask << 5;
3285 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3287 for (i = 0; i < timeout; i++) {
3289 * SW NVM semaphore bit is used for access to all
3290 * SW_FW_SYNC bits (not just NVM)
3292 if (ixgbe_get_eeprom_semaphore(hw))
3293 return IXGBE_ERR_SWFW_SYNC;
3295 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3296 if (!(gssr & (fwmask | swmask))) {
3298 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3299 ixgbe_release_eeprom_semaphore(hw);
3300 return IXGBE_SUCCESS;
3302 /* Resource is currently in use by FW or SW */
3303 ixgbe_release_eeprom_semaphore(hw);
3308 /* If time expired clear the bits holding the lock and retry */
3309 if (gssr & (fwmask | swmask))
3310 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3313 return IXGBE_ERR_SWFW_SYNC;
3317 * ixgbe_release_swfw_sync - Release SWFW semaphore
3318 * @hw: pointer to hardware structure
3319 * @mask: Mask to specify which semaphore to release
3321 * Releases the SWFW semaphore through the GSSR register for the specified
3322 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3324 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3329 DEBUGFUNC("ixgbe_release_swfw_sync");
3331 ixgbe_get_eeprom_semaphore(hw);
3333 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3335 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3337 ixgbe_release_eeprom_semaphore(hw);
3341 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3342 * @hw: pointer to hardware structure
3344 * Stops the receive data path and waits for the HW to internally empty
3345 * the Rx security block
3347 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3349 #define IXGBE_MAX_SECRX_POLL 40
3354 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3357 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3358 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3359 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3360 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3361 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3362 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3365 /* Use interrupt-safe sleep just in case */
3369 /* For informational purposes only */
3370 if (i >= IXGBE_MAX_SECRX_POLL)
3371 DEBUGOUT("Rx unit being enabled before security "
3372 "path fully disabled. Continuing with init.\n");
3374 return IXGBE_SUCCESS;
3378 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3379 * @hw: pointer to hardware structure
3380 * @reg_val: Value we read from AUTOC
3382 * The default case requires no protection so just to the register read.
3384 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3387 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3388 return IXGBE_SUCCESS;
3392 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3393 * @hw: pointer to hardware structure
3394 * @reg_val: value to write to AUTOC
3395 * @locked: bool to indicate whether the SW/FW lock was already taken by
3398 * The default case requires no protection so just to the register write.
3400 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3402 UNREFERENCED_1PARAMETER(locked);
3404 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3405 return IXGBE_SUCCESS;
3409 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3410 * @hw: pointer to hardware structure
3412 * Enables the receive data path.
3414 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3418 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3420 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3421 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3422 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3423 IXGBE_WRITE_FLUSH(hw);
3425 return IXGBE_SUCCESS;
3429 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3430 * @hw: pointer to hardware structure
3431 * @regval: register value to write to RXCTRL
3433 * Enables the Rx DMA unit
3435 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3437 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3439 if (regval & IXGBE_RXCTRL_RXEN)
3440 ixgbe_enable_rx(hw);
3442 ixgbe_disable_rx(hw);
3444 return IXGBE_SUCCESS;
3448 * ixgbe_blink_led_start_generic - Blink LED based on index.
3449 * @hw: pointer to hardware structure
3450 * @index: led number to blink
3452 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3454 ixgbe_link_speed speed = 0;
3457 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3458 s32 ret_val = IXGBE_SUCCESS;
3459 bool locked = false;
3461 DEBUGFUNC("ixgbe_blink_led_start_generic");
3464 return IXGBE_ERR_PARAM;
3467 * Link must be up to auto-blink the LEDs;
3468 * Force it if link is down.
3470 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3473 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3474 if (ret_val != IXGBE_SUCCESS)
3477 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3478 autoc_reg |= IXGBE_AUTOC_FLU;
3480 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3481 if (ret_val != IXGBE_SUCCESS)
3484 IXGBE_WRITE_FLUSH(hw);
3488 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3489 led_reg |= IXGBE_LED_BLINK(index);
3490 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3491 IXGBE_WRITE_FLUSH(hw);
3498 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3499 * @hw: pointer to hardware structure
3500 * @index: led number to stop blinking
3502 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3505 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3506 s32 ret_val = IXGBE_SUCCESS;
3507 bool locked = false;
3509 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3512 return IXGBE_ERR_PARAM;
3515 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3516 if (ret_val != IXGBE_SUCCESS)
3519 autoc_reg &= ~IXGBE_AUTOC_FLU;
3520 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3522 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3523 if (ret_val != IXGBE_SUCCESS)
3526 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3527 led_reg &= ~IXGBE_LED_BLINK(index);
3528 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3529 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3530 IXGBE_WRITE_FLUSH(hw);
3537 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3538 * @hw: pointer to hardware structure
3539 * @san_mac_offset: SAN MAC address offset
3541 * This function will read the EEPROM location for the SAN MAC address
3542 * pointer, and returns the value at that location. This is used in both
3543 * get and set mac_addr routines.
3545 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3546 u16 *san_mac_offset)
3550 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3553 * First read the EEPROM pointer to see if the MAC addresses are
3556 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3559 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3560 "eeprom at offset %d failed",
3561 IXGBE_SAN_MAC_ADDR_PTR);
3568 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3569 * @hw: pointer to hardware structure
3570 * @san_mac_addr: SAN MAC address
3572 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3573 * per-port, so set_lan_id() must be called before reading the addresses.
3574 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3575 * upon for non-SFP connections, so we must call it here.
3577 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3579 u16 san_mac_data, san_mac_offset;
3583 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3586 * First read the EEPROM pointer to see if the MAC addresses are
3587 * available. If they're not, no point in calling set_lan_id() here.
3589 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3590 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3591 goto san_mac_addr_out;
3593 /* make sure we know which port we need to program */
3594 hw->mac.ops.set_lan_id(hw);
3595 /* apply the port offset to the address offset */
3596 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3597 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3598 for (i = 0; i < 3; i++) {
3599 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3602 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3603 "eeprom read at offset %d failed",
3605 goto san_mac_addr_out;
3607 san_mac_addr[i * 2] = (u8)(san_mac_data);
3608 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3611 return IXGBE_SUCCESS;
3615 * No addresses available in this EEPROM. It's not an
3616 * error though, so just wipe the local address and return.
3618 for (i = 0; i < 6; i++)
3619 san_mac_addr[i] = 0xFF;
3620 return IXGBE_SUCCESS;
3624 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3625 * @hw: pointer to hardware structure
3626 * @san_mac_addr: SAN MAC address
3628 * Write a SAN MAC address to the EEPROM.
3630 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3633 u16 san_mac_data, san_mac_offset;
3636 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3638 /* Look for SAN mac address pointer. If not defined, return */
3639 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3640 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3641 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3643 /* Make sure we know which port we need to write */
3644 hw->mac.ops.set_lan_id(hw);
3645 /* Apply the port offset to the address offset */
3646 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3647 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3649 for (i = 0; i < 3; i++) {
3650 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3651 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3652 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3656 return IXGBE_SUCCESS;
3660 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3661 * @hw: pointer to hardware structure
3663 * Read PCIe configuration space, and get the MSI-X vector count from
3664 * the capabilities table.
3666 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3672 switch (hw->mac.type) {
3673 case ixgbe_mac_82598EB:
3674 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3675 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3677 case ixgbe_mac_82599EB:
3678 case ixgbe_mac_X540:
3679 case ixgbe_mac_X550:
3680 case ixgbe_mac_X550EM_x:
3681 case ixgbe_mac_X550EM_a:
3682 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3683 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3689 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3690 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3691 if (IXGBE_REMOVED(hw->hw_addr))
3693 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3695 /* MSI-X count is zero-based in HW */
3698 if (msix_count > max_msix_count)
3699 msix_count = max_msix_count;
3705 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3706 * @hw: pointer to hardware structure
3707 * @addr: Address to put into receive address register
3708 * @vmdq: VMDq pool to assign
3710 * Puts an ethernet address into a receive address register, or
3711 * finds the rar that it is aleady in; adds to the pool list
3713 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3715 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3716 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3718 u32 rar_low, rar_high;
3719 u32 addr_low, addr_high;
3721 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3723 /* swap bytes for HW little endian */
3724 addr_low = addr[0] | (addr[1] << 8)
3727 addr_high = addr[4] | (addr[5] << 8);
3730 * Either find the mac_id in rar or find the first empty space.
3731 * rar_highwater points to just after the highest currently used
3732 * rar in order to shorten the search. It grows when we add a new
3735 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3736 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3738 if (((IXGBE_RAH_AV & rar_high) == 0)
3739 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3740 first_empty_rar = rar;
3741 } else if ((rar_high & 0xFFFF) == addr_high) {
3742 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3743 if (rar_low == addr_low)
3744 break; /* found it already in the rars */
3748 if (rar < hw->mac.rar_highwater) {
3749 /* already there so just add to the pool bits */
3750 ixgbe_set_vmdq(hw, rar, vmdq);
3751 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3752 /* stick it into first empty RAR slot we found */
3753 rar = first_empty_rar;
3754 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3755 } else if (rar == hw->mac.rar_highwater) {
3756 /* add it to the top of the list and inc the highwater mark */
3757 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3758 hw->mac.rar_highwater++;
3759 } else if (rar >= hw->mac.num_rar_entries) {
3760 return IXGBE_ERR_INVALID_MAC_ADDR;
3764 * If we found rar[0], make sure the default pool bit (we use pool 0)
3765 * remains cleared to be sure default pool packets will get delivered
3768 ixgbe_clear_vmdq(hw, rar, 0);
3774 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3775 * @hw: pointer to hardware struct
3776 * @rar: receive address register index to disassociate
3777 * @vmdq: VMDq pool index to remove from the rar
3779 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3781 u32 mpsar_lo, mpsar_hi;
3782 u32 rar_entries = hw->mac.num_rar_entries;
3784 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3786 /* Make sure we are using a valid rar index range */
3787 if (rar >= rar_entries) {
3788 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3789 "RAR index %d is out of range.\n", rar);
3790 return IXGBE_ERR_INVALID_ARGUMENT;
3793 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3794 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3796 if (IXGBE_REMOVED(hw->hw_addr))
3799 if (!mpsar_lo && !mpsar_hi)
3802 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3804 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3808 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3811 } else if (vmdq < 32) {
3812 mpsar_lo &= ~(1 << vmdq);
3813 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3815 mpsar_hi &= ~(1 << (vmdq - 32));
3816 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3819 /* was that the last pool using this rar? */
3820 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3821 rar != 0 && rar != hw->mac.san_mac_rar_index)
3822 hw->mac.ops.clear_rar(hw, rar);
3824 return IXGBE_SUCCESS;
3828 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3829 * @hw: pointer to hardware struct
3830 * @rar: receive address register index to associate with a VMDq index
3831 * @vmdq: VMDq pool index
3833 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3836 u32 rar_entries = hw->mac.num_rar_entries;
3838 DEBUGFUNC("ixgbe_set_vmdq_generic");
3840 /* Make sure we are using a valid rar index range */
3841 if (rar >= rar_entries) {
3842 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3843 "RAR index %d is out of range.\n", rar);
3844 return IXGBE_ERR_INVALID_ARGUMENT;
3848 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3852 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3853 mpsar |= 1 << (vmdq - 32);
3854 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3856 return IXGBE_SUCCESS;
3860 * This function should only be involved in the IOV mode.
3861 * In IOV mode, Default pool is next pool after the number of
3862 * VFs advertized and not 0.
3863 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3865 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3866 * @hw: pointer to hardware struct
3867 * @vmdq: VMDq pool index
3869 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3871 u32 rar = hw->mac.san_mac_rar_index;
3873 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3876 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3877 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3879 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3880 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3883 return IXGBE_SUCCESS;
3887 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3888 * @hw: pointer to hardware structure
3890 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3894 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3895 DEBUGOUT(" Clearing UTA\n");
3897 for (i = 0; i < 128; i++)
3898 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3900 return IXGBE_SUCCESS;
3904 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3905 * @hw: pointer to hardware structure
3906 * @vlan: VLAN id to write to VLAN filter
3908 * return the VLVF index where this VLAN id should be placed
3911 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3913 s32 regindex, first_empty_slot;
3916 /* short cut the special case */
3920 /* if vlvf_bypass is set we don't want to use an empty slot, we
3921 * will simply bypass the VLVF if there are no entries present in the
3922 * VLVF that contain our VLAN
3924 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3926 /* add VLAN enable bit for comparison */
3927 vlan |= IXGBE_VLVF_VIEN;
3929 /* Search for the vlan id in the VLVF entries. Save off the first empty
3930 * slot found along the way.
3932 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3934 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3935 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3938 if (!first_empty_slot && !bits)
3939 first_empty_slot = regindex;
3942 /* If we are here then we didn't find the VLAN. Return first empty
3943 * slot we found during our search, else error.
3945 if (!first_empty_slot)
3946 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3948 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3952 * ixgbe_set_vfta_generic - Set VLAN filter table
3953 * @hw: pointer to hardware structure
3954 * @vlan: VLAN id to write to VLAN filter
3955 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3956 * @vlan_on: boolean flag to turn on/off VLAN
3957 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3959 * Turn on/off specified VLAN in the VLAN filter table.
3961 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3962 bool vlan_on, bool vlvf_bypass)
3964 u32 regidx, vfta_delta, vfta;
3967 DEBUGFUNC("ixgbe_set_vfta_generic");
3969 if (vlan > 4095 || vind > 63)
3970 return IXGBE_ERR_PARAM;
3973 * this is a 2 part operation - first the VFTA, then the
3974 * VLVF and VLVFB if VT Mode is set
3975 * We don't write the VFTA until we know the VLVF part succeeded.
3979 * The VFTA is a bitstring made up of 128 32-bit registers
3980 * that enable the particular VLAN id, much like the MTA:
3981 * bits[11-5]: which register
3982 * bits[4-0]: which bit in the register
3985 vfta_delta = 1 << (vlan % 32);
3986 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3989 * vfta_delta represents the difference between the current value
3990 * of vfta and the value we want in the register. Since the diff
3991 * is an XOR mask we can just update the vfta using an XOR
3993 vfta_delta &= vlan_on ? ~vfta : vfta;
3997 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3999 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
4001 if (ret_val != IXGBE_SUCCESS) {
4008 /* Update VFTA now that we are ready for traffic */
4010 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
4012 return IXGBE_SUCCESS;
4016 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
4017 * @hw: pointer to hardware structure
4018 * @vlan: VLAN id to write to VLAN filter
4019 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
4020 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
4021 * @vfta_delta: pointer to the difference between the current value of VFTA
4022 * and the desired value
4023 * @vfta: the desired value of the VFTA
4024 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4026 * Turn on/off specified bit in VLVF table.
4028 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4029 bool vlan_on, u32 *vfta_delta, u32 vfta,
4035 DEBUGFUNC("ixgbe_set_vlvf_generic");
4037 if (vlan > 4095 || vind > 63)
4038 return IXGBE_ERR_PARAM;
4040 /* If VT Mode is set
4042 * make sure the vlan is in VLVF
4043 * set the vind bit in the matching VLVFB
4045 * clear the pool bit and possibly the vind
4047 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4048 return IXGBE_SUCCESS;
4050 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4054 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4056 /* set the pool bit */
4057 bits |= 1 << (vind % 32);
4061 /* clear the pool bit */
4062 bits ^= 1 << (vind % 32);
4065 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4066 /* Clear VFTA first, then disable VLVF. Otherwise
4067 * we run the risk of stray packets leaking into
4068 * the PF via the default pool
4071 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4073 /* disable VLVF and clear remaining bit from pool */
4074 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4075 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4077 return IXGBE_SUCCESS;
4080 /* If there are still bits set in the VLVFB registers
4081 * for the VLAN ID indicated we need to see if the
4082 * caller is requesting that we clear the VFTA entry bit.
4083 * If the caller has requested that we clear the VFTA
4084 * entry bit but there are still pools/VFs using this VLAN
4085 * ID entry then ignore the request. We're not worried
4086 * about the case where we're turning the VFTA VLAN ID
4087 * entry bit on, only when requested to turn it off as
4088 * there may be multiple pools and/or VFs using the
4089 * VLAN ID entry. In that case we cannot clear the
4090 * VFTA bit until all pools/VFs using that VLAN ID have also
4091 * been cleared. This will be indicated by "bits" being
4097 /* record pool change and enable VLAN ID if not already enabled */
4098 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4099 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4101 return IXGBE_SUCCESS;
4105 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4106 * @hw: pointer to hardware structure
4108 * Clears the VLAN filer table, and the VMDq index associated with the filter
4110 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4114 DEBUGFUNC("ixgbe_clear_vfta_generic");
4116 for (offset = 0; offset < hw->mac.vft_size; offset++)
4117 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4119 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4120 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4121 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4122 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4125 return IXGBE_SUCCESS;
4129 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4130 * @hw: pointer to hardware structure
4132 * Contains the logic to identify if we need to verify link for the
4135 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4138 /* Does FW say we need the fix */
4139 if (!hw->need_crosstalk_fix)
4142 /* Only consider SFP+ PHYs i.e. media type fiber */
4143 switch (hw->mac.ops.get_media_type(hw)) {
4144 case ixgbe_media_type_fiber:
4145 case ixgbe_media_type_fiber_qsfp:
4155 * ixgbe_check_mac_link_generic - Determine link and speed status
4156 * @hw: pointer to hardware structure
4157 * @speed: pointer to link speed
4158 * @link_up: true when link is up
4159 * @link_up_wait_to_complete: bool used to wait for link up or not
4161 * Reads the links register to determine if link is up and the current speed
4163 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4164 bool *link_up, bool link_up_wait_to_complete)
4166 u32 links_reg, links_orig;
4169 DEBUGFUNC("ixgbe_check_mac_link_generic");
4171 /* If Crosstalk fix enabled do the sanity check of making sure
4172 * the SFP+ cage is full.
4174 if (ixgbe_need_crosstalk_fix(hw)) {
4177 switch (hw->mac.type) {
4178 case ixgbe_mac_82599EB:
4179 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4182 case ixgbe_mac_X550EM_x:
4183 case ixgbe_mac_X550EM_a:
4184 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4188 /* sanity check - No SFP+ devices here */
4189 sfp_cage_full = false;
4193 if (!sfp_cage_full) {
4195 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4196 return IXGBE_SUCCESS;
4200 /* clear the old state */
4201 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4203 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4205 if (links_orig != links_reg) {
4206 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4207 links_orig, links_reg);
4210 if (link_up_wait_to_complete) {
4211 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4212 if (links_reg & IXGBE_LINKS_UP) {
4219 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4222 if (links_reg & IXGBE_LINKS_UP)
4228 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4229 case IXGBE_LINKS_SPEED_10G_82599:
4230 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4231 if (hw->mac.type >= ixgbe_mac_X550) {
4232 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4233 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4236 case IXGBE_LINKS_SPEED_1G_82599:
4237 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4239 case IXGBE_LINKS_SPEED_100_82599:
4240 *speed = IXGBE_LINK_SPEED_100_FULL;
4241 if (hw->mac.type == ixgbe_mac_X550) {
4242 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4243 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4246 case IXGBE_LINKS_SPEED_10_X550EM_A:
4247 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4248 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4249 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
4250 *speed = IXGBE_LINK_SPEED_10_FULL;
4254 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4257 return IXGBE_SUCCESS;
4261 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4263 * @hw: pointer to hardware structure
4264 * @wwnn_prefix: the alternative WWNN prefix
4265 * @wwpn_prefix: the alternative WWPN prefix
4267 * This function will read the EEPROM from the alternative SAN MAC address
4268 * block to check the support for the alternative WWNN/WWPN prefix support.
4270 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4274 u16 alt_san_mac_blk_offset;
4276 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4278 /* clear output first */
4279 *wwnn_prefix = 0xFFFF;
4280 *wwpn_prefix = 0xFFFF;
4282 /* check if alternative SAN MAC is supported */
4283 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4284 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4285 goto wwn_prefix_err;
4287 if ((alt_san_mac_blk_offset == 0) ||
4288 (alt_san_mac_blk_offset == 0xFFFF))
4289 goto wwn_prefix_out;
4291 /* check capability in alternative san mac address block */
4292 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4293 if (hw->eeprom.ops.read(hw, offset, &caps))
4294 goto wwn_prefix_err;
4295 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4296 goto wwn_prefix_out;
4298 /* get the corresponding prefix for WWNN/WWPN */
4299 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4300 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4301 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4302 "eeprom read at offset %d failed", offset);
4305 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4306 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4307 goto wwn_prefix_err;
4310 return IXGBE_SUCCESS;
4313 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4314 "eeprom read at offset %d failed", offset);
4315 return IXGBE_SUCCESS;
4319 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4320 * @hw: pointer to hardware structure
4321 * @bs: the fcoe boot status
4323 * This function will read the FCOE boot status from the iSCSI FCOE block
4325 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4327 u16 offset, caps, flags;
4330 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4332 /* clear output first */
4333 *bs = ixgbe_fcoe_bootstatus_unavailable;
4335 /* check if FCOE IBA block is present */
4336 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4337 status = hw->eeprom.ops.read(hw, offset, &caps);
4338 if (status != IXGBE_SUCCESS)
4341 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4344 /* check if iSCSI FCOE block is populated */
4345 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4346 if (status != IXGBE_SUCCESS)
4349 if ((offset == 0) || (offset == 0xFFFF))
4352 /* read fcoe flags in iSCSI FCOE block */
4353 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4354 status = hw->eeprom.ops.read(hw, offset, &flags);
4355 if (status != IXGBE_SUCCESS)
4358 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4359 *bs = ixgbe_fcoe_bootstatus_enabled;
4361 *bs = ixgbe_fcoe_bootstatus_disabled;
4368 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4369 * @hw: pointer to hardware structure
4370 * @enable: enable or disable switch for MAC anti-spoofing
4371 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4374 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4376 int vf_target_reg = vf >> 3;
4377 int vf_target_shift = vf % 8;
4380 if (hw->mac.type == ixgbe_mac_82598EB)
4383 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4385 pfvfspoof |= (1 << vf_target_shift);
4387 pfvfspoof &= ~(1 << vf_target_shift);
4388 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4392 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4393 * @hw: pointer to hardware structure
4394 * @enable: enable or disable switch for VLAN anti-spoofing
4395 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4398 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4400 int vf_target_reg = vf >> 3;
4401 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4404 if (hw->mac.type == ixgbe_mac_82598EB)
4407 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4409 pfvfspoof |= (1 << vf_target_shift);
4411 pfvfspoof &= ~(1 << vf_target_shift);
4412 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4416 * ixgbe_get_device_caps_generic - Get additional device capabilities
4417 * @hw: pointer to hardware structure
4418 * @device_caps: the EEPROM word with the extra device capabilities
4420 * This function will read the EEPROM location for the device capabilities,
4421 * and return the word through device_caps.
4423 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4425 DEBUGFUNC("ixgbe_get_device_caps_generic");
4427 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4429 return IXGBE_SUCCESS;
4433 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4434 * @hw: pointer to hardware structure
4437 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4442 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4444 /* Enable relaxed ordering */
4445 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4446 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4447 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4448 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4451 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4452 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4453 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4454 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4455 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4461 * ixgbe_calculate_checksum - Calculate checksum for buffer
4462 * @buffer: pointer to EEPROM
4463 * @length: size of EEPROM to calculate a checksum for
4464 * Calculates the checksum for some buffer on a specified length. The
4465 * checksum calculated is returned.
4467 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4472 DEBUGFUNC("ixgbe_calculate_checksum");
4477 for (i = 0; i < length; i++)
4480 return (u8) (0 - sum);
4484 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4485 * @hw: pointer to the HW structure
4486 * @buffer: command to write and where the return status will be placed
4487 * @length: length of buffer, must be multiple of 4 bytes
4488 * @timeout: time in ms to wait for command completion
4490 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4491 * else returns semaphore error when encountering an error acquiring
4492 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4494 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4497 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4503 DEBUGFUNC("ixgbe_hic_unlocked");
4505 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4506 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4507 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4510 /* Set bit 9 of FWSTS clearing FW reset indication */
4511 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4512 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4514 /* Check that the host interface is enabled. */
4515 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4516 if (!(hicr & IXGBE_HICR_EN)) {
4517 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4518 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4521 /* Calculate length in DWORDs. We must be DWORD aligned */
4522 if (length % sizeof(u32)) {
4523 DEBUGOUT("Buffer length failure, not aligned to dword");
4524 return IXGBE_ERR_INVALID_ARGUMENT;
4527 dword_len = length >> 2;
4529 /* The device driver writes the relevant command block
4530 * into the ram area.
4532 for (i = 0; i < dword_len; i++)
4533 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4534 i, IXGBE_CPU_TO_LE32(buffer[i]));
4536 /* Setting this bit tells the ARC that a new command is pending. */
4537 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4539 for (i = 0; i < timeout; i++) {
4540 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4541 if (!(hicr & IXGBE_HICR_C))
4546 /* Check command completion */
4547 if ((timeout && i == timeout) ||
4548 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4549 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4550 "Command has failed with no status valid.\n");
4551 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4554 return IXGBE_SUCCESS;
4558 * ixgbe_host_interface_command - Issue command to manageability block
4559 * @hw: pointer to the HW structure
4560 * @buffer: contains the command to write and where the return status will
4562 * @length: length of buffer, must be multiple of 4 bytes
4563 * @timeout: time in ms to wait for command completion
4564 * @return_data: read and return data from the buffer (true) or not (false)
4565 * Needed because FW structures are big endian and decoding of
4566 * these fields can be 8 bit or 16 bit based on command. Decoding
4567 * is not easily understood without making a table of commands.
4568 * So we will leave this up to the caller to read back the data
4571 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4572 * else returns semaphore error when encountering an error acquiring
4573 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4575 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4576 u32 length, u32 timeout, bool return_data)
4578 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4584 DEBUGFUNC("ixgbe_host_interface_command");
4586 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4587 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4588 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4591 /* Take management host interface semaphore */
4592 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4596 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4603 /* Calculate length in DWORDs */
4604 dword_len = hdr_size >> 2;
4606 /* first pull in the header so we know the buffer length */
4607 for (bi = 0; bi < dword_len; bi++) {
4608 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4609 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4612 /* If there is any thing in data position pull it in */
4613 buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
4617 if (length < buf_len + hdr_size) {
4618 DEBUGOUT("Buffer not large enough for reply message.\n");
4619 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4623 /* Calculate length in DWORDs, add 3 for odd lengths */
4624 dword_len = (buf_len + 3) >> 2;
4626 /* Pull in the rest of the buffer (bi is where we left off) */
4627 for (; bi <= dword_len; bi++) {
4628 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4629 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4633 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4639 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4640 * @hw: pointer to the HW structure
4641 * @maj: driver version major number
4642 * @min: driver version minor number
4643 * @build: driver version build number
4644 * @sub: driver version sub build number
4646 * Sends driver version number to firmware through the manageability
4647 * block. On success return IXGBE_SUCCESS
4648 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4649 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4651 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4652 u8 build, u8 sub, u16 len,
4653 const char *driver_ver)
4655 struct ixgbe_hic_drv_info fw_cmd;
4657 s32 ret_val = IXGBE_SUCCESS;
4659 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4660 UNREFERENCED_2PARAMETER(len, driver_ver);
4662 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4663 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4664 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4665 fw_cmd.port_num = (u8)hw->bus.func;
4666 fw_cmd.ver_maj = maj;
4667 fw_cmd.ver_min = min;
4668 fw_cmd.ver_build = build;
4669 fw_cmd.ver_sub = sub;
4670 fw_cmd.hdr.checksum = 0;
4671 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4672 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4676 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4677 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4679 IXGBE_HI_COMMAND_TIMEOUT,
4681 if (ret_val != IXGBE_SUCCESS)
4684 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4685 FW_CEM_RESP_STATUS_SUCCESS)
4686 ret_val = IXGBE_SUCCESS;
4688 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4697 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4698 * @hw: pointer to hardware structure
4699 * @num_pb: number of packet buffers to allocate
4700 * @headroom: reserve n KB of headroom
4701 * @strategy: packet buffer allocation strategy
4703 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4706 u32 pbsize = hw->mac.rx_pb_size;
4708 u32 rxpktsize, txpktsize, txpbthresh;
4710 /* Reserve headroom */
4716 /* Divide remaining packet buffer space amongst the number of packet
4717 * buffers requested using supplied strategy.
4720 case PBA_STRATEGY_WEIGHTED:
4721 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4722 * buffer with 5/8 of the packet buffer space.
4724 rxpktsize = (pbsize * 5) / (num_pb * 4);
4725 pbsize -= rxpktsize * (num_pb / 2);
4726 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4727 for (; i < (num_pb / 2); i++)
4728 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4729 /* Fall through to configure remaining packet buffers */
4730 case PBA_STRATEGY_EQUAL:
4731 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4732 for (; i < num_pb; i++)
4733 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4739 /* Only support an equally distributed Tx packet buffer strategy. */
4740 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4741 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4742 for (i = 0; i < num_pb; i++) {
4743 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4744 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4747 /* Clear unused TCs, if any, to zero buffer size*/
4748 for (; i < IXGBE_MAX_PB; i++) {
4749 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4750 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4751 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4756 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4757 * @hw: pointer to the hardware structure
4759 * The 82599 and x540 MACs can experience issues if TX work is still pending
4760 * when a reset occurs. This function prevents this by flushing the PCIe
4761 * buffers on the system.
4763 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4765 u32 gcr_ext, hlreg0, i, poll;
4769 * If double reset is not requested then all transactions should
4770 * already be clear and as such there is no work to do
4772 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4776 * Set loopback enable to prevent any transmits from being sent
4777 * should the link come up. This assumes that the RXCTRL.RXEN bit
4778 * has already been cleared.
4780 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4781 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4783 /* Wait for a last completion before clearing buffers */
4784 IXGBE_WRITE_FLUSH(hw);
4788 * Before proceeding, make sure that the PCIe block does not have
4789 * transactions pending.
4791 poll = ixgbe_pcie_timeout_poll(hw);
4792 for (i = 0; i < poll; i++) {
4794 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4795 if (IXGBE_REMOVED(hw->hw_addr))
4797 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4802 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4803 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4804 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4805 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4807 /* Flush all writes and allow 20usec for all transactions to clear */
4808 IXGBE_WRITE_FLUSH(hw);
4811 /* restore previous register values */
4812 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4813 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4816 STATIC const u8 ixgbe_emc_temp_data[4] = {
4817 IXGBE_EMC_INTERNAL_DATA,
4818 IXGBE_EMC_DIODE1_DATA,
4819 IXGBE_EMC_DIODE2_DATA,
4820 IXGBE_EMC_DIODE3_DATA
4822 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4823 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4824 IXGBE_EMC_DIODE1_THERM_LIMIT,
4825 IXGBE_EMC_DIODE2_THERM_LIMIT,
4826 IXGBE_EMC_DIODE3_THERM_LIMIT
4830 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4831 * @hw: pointer to hardware structure
4832 * @data: pointer to the thermal sensor data structure
4834 * Returns the thermal sensor data structure
4836 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4838 s32 status = IXGBE_SUCCESS;
4846 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4848 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4850 /* Only support thermal sensors attached to 82599 physical port 0 */
4851 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4852 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4853 status = IXGBE_NOT_IMPLEMENTED;
4857 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4861 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4862 status = IXGBE_NOT_IMPLEMENTED;
4866 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4870 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4871 != IXGBE_ETS_TYPE_EMC) {
4872 status = IXGBE_NOT_IMPLEMENTED;
4876 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4877 if (num_sensors > IXGBE_MAX_SENSORS)
4878 num_sensors = IXGBE_MAX_SENSORS;
4880 for (i = 0; i < num_sensors; i++) {
4881 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4886 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4887 IXGBE_ETS_DATA_INDEX_SHIFT);
4888 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4889 IXGBE_ETS_DATA_LOC_SHIFT);
4891 if (sensor_location != 0) {
4892 status = hw->phy.ops.read_i2c_byte(hw,
4893 ixgbe_emc_temp_data[sensor_index],
4894 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4895 &data->sensor[i].temp);
4905 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4906 * @hw: pointer to hardware structure
4908 * Inits the thermal sensor thresholds according to the NVM map
4909 * and save off the threshold and location values into mac.thermal_sensor_data
4911 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4913 s32 status = IXGBE_SUCCESS;
4918 u8 low_thresh_delta;
4924 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4926 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4928 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4930 /* Only support thermal sensors attached to 82599 physical port 0 */
4931 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4932 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4933 return IXGBE_NOT_IMPLEMENTED;
4935 offset = IXGBE_ETS_CFG;
4936 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4938 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4939 return IXGBE_NOT_IMPLEMENTED;
4941 offset = ets_offset;
4942 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4944 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4945 != IXGBE_ETS_TYPE_EMC)
4946 return IXGBE_NOT_IMPLEMENTED;
4948 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4949 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4950 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4952 for (i = 0; i < num_sensors; i++) {
4953 offset = ets_offset + 1 + i;
4954 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4955 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4956 "eeprom read at offset %d failed",
4960 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4961 IXGBE_ETS_DATA_INDEX_SHIFT);
4962 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4963 IXGBE_ETS_DATA_LOC_SHIFT);
4964 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4966 hw->phy.ops.write_i2c_byte(hw,
4967 ixgbe_emc_therm_limit[sensor_index],
4968 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4970 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4971 data->sensor[i].location = sensor_location;
4972 data->sensor[i].caution_thresh = therm_limit;
4973 data->sensor[i].max_op_thresh = therm_limit -
4980 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4981 "eeprom read at offset %d failed", offset);
4982 return IXGBE_NOT_IMPLEMENTED;
4987 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
4988 * @hw: pointer to hardware structure
4989 * @map: pointer to u8 arr for returning map
4991 * Read the rtrup2tc HW register and resolve its content into map
4993 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
4997 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
4998 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
4999 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5000 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5004 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5009 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5010 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5011 if (hw->mac.type != ixgbe_mac_82598EB) {
5012 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5013 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5014 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5015 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5016 hw->mac.set_lben = true;
5018 hw->mac.set_lben = false;
5021 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5022 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5026 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5031 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5032 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5034 if (hw->mac.type != ixgbe_mac_82598EB) {
5035 if (hw->mac.set_lben) {
5036 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5037 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5038 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5039 hw->mac.set_lben = false;
5045 * ixgbe_mng_present - returns true when management capability is present
5046 * @hw: pointer to hardware structure
5048 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5052 if (hw->mac.type < ixgbe_mac_82599EB)
5055 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5056 fwsm &= IXGBE_FWSM_MODE_MASK;
5057 return fwsm == IXGBE_FWSM_FW_MODE_PT;
5061 * ixgbe_mng_enabled - Is the manageability engine enabled?
5062 * @hw: pointer to hardware structure
5064 * Returns true if the manageability engine is enabled.
5066 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5068 u32 fwsm, manc, factps;
5070 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5071 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5074 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5075 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5078 if (hw->mac.type <= ixgbe_mac_X540) {
5079 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5080 if (factps & IXGBE_FACTPS_MNGCG)
5088 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5089 * @hw: pointer to hardware structure
5090 * @speed: new link speed
5091 * @autoneg_wait_to_complete: true when waiting for completion is needed
5093 * Set the link speed in the MAC and/or PHY register and restarts link.
5095 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5096 ixgbe_link_speed speed,
5097 bool autoneg_wait_to_complete)
5099 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5100 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5101 s32 status = IXGBE_SUCCESS;
5104 bool autoneg, link_up = false;
5106 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5108 /* Mask off requested but non-supported speeds */
5109 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5110 if (status != IXGBE_SUCCESS)
5113 speed &= link_speed;
5115 /* Try each speed one by one, highest priority first. We do this in
5116 * software because 10Gb fiber doesn't support speed autonegotiation.
5118 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5120 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5122 /* Set the module link speed */
5123 switch (hw->phy.media_type) {
5124 case ixgbe_media_type_fiber:
5125 ixgbe_set_rate_select_speed(hw,
5126 IXGBE_LINK_SPEED_10GB_FULL);
5128 case ixgbe_media_type_fiber_qsfp:
5129 /* QSFP module automatically detects MAC link speed */
5132 DEBUGOUT("Unexpected media type.\n");
5136 /* Allow module to change analog characteristics (1G->10G) */
5139 status = ixgbe_setup_mac_link(hw,
5140 IXGBE_LINK_SPEED_10GB_FULL,
5141 autoneg_wait_to_complete);
5142 if (status != IXGBE_SUCCESS)
5145 /* Flap the Tx laser if it has not already been done */
5146 ixgbe_flap_tx_laser(hw);
5148 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5149 * Section 73.10.2, we may have to wait up to 500ms if KR is
5150 * attempted. 82599 uses the same timing for 10g SFI.
5152 for (i = 0; i < 5; i++) {
5153 /* Wait for the link partner to also set speed */
5156 /* If we have link, just jump out */
5157 status = ixgbe_check_link(hw, &link_speed,
5159 if (status != IXGBE_SUCCESS)
5167 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5169 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5170 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5172 /* Set the module link speed */
5173 switch (hw->phy.media_type) {
5174 case ixgbe_media_type_fiber:
5175 ixgbe_set_rate_select_speed(hw,
5176 IXGBE_LINK_SPEED_1GB_FULL);
5178 case ixgbe_media_type_fiber_qsfp:
5179 /* QSFP module automatically detects link speed */
5182 DEBUGOUT("Unexpected media type.\n");
5186 /* Allow module to change analog characteristics (10G->1G) */
5189 status = ixgbe_setup_mac_link(hw,
5190 IXGBE_LINK_SPEED_1GB_FULL,
5191 autoneg_wait_to_complete);
5192 if (status != IXGBE_SUCCESS)
5195 /* Flap the Tx laser if it has not already been done */
5196 ixgbe_flap_tx_laser(hw);
5198 /* Wait for the link partner to also set speed */
5201 /* If we have link, just jump out */
5202 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5203 if (status != IXGBE_SUCCESS)
5210 /* We didn't get link. Configure back to the highest speed we tried,
5211 * (if there was more than one). We call ourselves back with just the
5212 * single highest speed that the user requested.
5215 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5217 autoneg_wait_to_complete);
5220 /* Set autoneg_advertised value based on input link speed */
5221 hw->phy.autoneg_advertised = 0;
5223 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5224 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5226 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5227 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5233 * ixgbe_set_soft_rate_select_speed - Set module link speed
5234 * @hw: pointer to hardware structure
5235 * @speed: link speed to set
5237 * Set module link speed via the soft rate select.
5239 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5240 ixgbe_link_speed speed)
5246 case IXGBE_LINK_SPEED_10GB_FULL:
5247 /* one bit mask same as setting on */
5248 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5250 case IXGBE_LINK_SPEED_1GB_FULL:
5251 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5254 DEBUGOUT("Invalid fixed module speed\n");
5259 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5260 IXGBE_I2C_EEPROM_DEV_ADDR2,
5263 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5267 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5269 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5270 IXGBE_I2C_EEPROM_DEV_ADDR2,
5273 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5278 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5279 IXGBE_I2C_EEPROM_DEV_ADDR2,
5282 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5286 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5288 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5289 IXGBE_I2C_EEPROM_DEV_ADDR2,
5292 DEBUGOUT("Failed to write Rx Rate Select RS1\n");