1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
5 #include "ixgbe_common.h"
8 #include "ixgbe_dcb_82599.h"
11 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
12 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
13 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
14 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
15 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
16 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
18 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
19 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
20 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
21 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw);
23 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
24 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
26 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
27 u16 words, u16 *data);
28 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
29 u16 words, u16 *data);
30 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
34 * ixgbe_init_ops_generic - Inits function ptrs
35 * @hw: pointer to the hardware structure
37 * Initialize the function pointers.
39 s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
41 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
42 struct ixgbe_mac_info *mac = &hw->mac;
43 u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
45 DEBUGFUNC("ixgbe_init_ops_generic");
48 eeprom->ops.init_params = ixgbe_init_eeprom_params_generic;
49 /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
50 if (eec & IXGBE_EEC_PRES) {
51 eeprom->ops.read = ixgbe_read_eerd_generic;
52 eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic;
54 eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic;
55 eeprom->ops.read_buffer =
56 ixgbe_read_eeprom_buffer_bit_bang_generic;
58 eeprom->ops.write = ixgbe_write_eeprom_generic;
59 eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic;
60 eeprom->ops.validate_checksum =
61 ixgbe_validate_eeprom_checksum_generic;
62 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic;
63 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic;
66 mac->ops.init_hw = ixgbe_init_hw_generic;
67 mac->ops.reset_hw = NULL;
68 mac->ops.start_hw = ixgbe_start_hw_generic;
69 mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic;
70 mac->ops.get_media_type = NULL;
71 mac->ops.get_supported_physical_layer = NULL;
72 mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic;
73 mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic;
74 mac->ops.stop_adapter = ixgbe_stop_adapter_generic;
75 mac->ops.get_bus_info = ixgbe_get_bus_info_generic;
76 mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie;
77 mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync;
78 mac->ops.release_swfw_sync = ixgbe_release_swfw_sync;
79 mac->ops.prot_autoc_read = prot_autoc_read_generic;
80 mac->ops.prot_autoc_write = prot_autoc_write_generic;
83 mac->ops.led_on = ixgbe_led_on_generic;
84 mac->ops.led_off = ixgbe_led_off_generic;
85 mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
86 mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
87 mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
89 /* RAR, Multicast, VLAN */
90 mac->ops.set_rar = ixgbe_set_rar_generic;
91 mac->ops.clear_rar = ixgbe_clear_rar_generic;
92 mac->ops.insert_mac_addr = NULL;
93 mac->ops.set_vmdq = NULL;
94 mac->ops.clear_vmdq = NULL;
95 mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic;
96 mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic;
97 mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic;
98 mac->ops.enable_mc = ixgbe_enable_mc_generic;
99 mac->ops.disable_mc = ixgbe_disable_mc_generic;
100 mac->ops.clear_vfta = NULL;
101 mac->ops.set_vfta = NULL;
102 mac->ops.set_vlvf = NULL;
103 mac->ops.init_uta_tables = NULL;
104 mac->ops.enable_rx = ixgbe_enable_rx_generic;
105 mac->ops.disable_rx = ixgbe_disable_rx_generic;
108 mac->ops.fc_enable = ixgbe_fc_enable_generic;
109 mac->ops.setup_fc = ixgbe_setup_fc_generic;
110 mac->ops.fc_autoneg = ixgbe_fc_autoneg;
113 mac->ops.get_link_capabilities = NULL;
114 mac->ops.setup_link = NULL;
115 mac->ops.check_link = NULL;
116 mac->ops.dmac_config = NULL;
117 mac->ops.dmac_update_tcs = NULL;
118 mac->ops.dmac_config_tcs = NULL;
120 return IXGBE_SUCCESS;
124 * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation
126 * @hw: pointer to hardware structure
128 * This function returns true if the device supports flow control
129 * autonegotiation, and false if it does not.
132 bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
134 bool supported = false;
135 ixgbe_link_speed speed;
138 DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
140 switch (hw->phy.media_type) {
141 case ixgbe_media_type_fiber_qsfp:
142 case ixgbe_media_type_fiber:
143 /* flow control autoneg black list */
144 switch (hw->device_id) {
145 case IXGBE_DEV_ID_X550EM_A_SFP:
146 case IXGBE_DEV_ID_X550EM_A_SFP_N:
147 case IXGBE_DEV_ID_X550EM_A_QSFP:
148 case IXGBE_DEV_ID_X550EM_A_QSFP_N:
152 hw->mac.ops.check_link(hw, &speed, &link_up, false);
153 /* if link is down, assume supported */
155 supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
162 case ixgbe_media_type_backplane:
163 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
168 case ixgbe_media_type_copper:
169 /* only some copper devices support flow control autoneg */
170 switch (hw->device_id) {
171 case IXGBE_DEV_ID_82599_T3_LOM:
172 case IXGBE_DEV_ID_X540T:
173 case IXGBE_DEV_ID_X540T1:
174 case IXGBE_DEV_ID_X550T:
175 case IXGBE_DEV_ID_X550T1:
176 case IXGBE_DEV_ID_X550EM_X_10G_T:
177 case IXGBE_DEV_ID_X550EM_A_10G_T:
178 case IXGBE_DEV_ID_X550EM_A_1G_T:
179 case IXGBE_DEV_ID_X550EM_A_1G_T_L:
190 ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
191 "Device %x does not support flow control autoneg",
197 * ixgbe_setup_fc_generic - Set up flow control
198 * @hw: pointer to hardware structure
200 * Called at init time to set up flow control.
202 s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
204 s32 ret_val = IXGBE_SUCCESS;
205 u32 reg = 0, reg_bp = 0;
209 DEBUGFUNC("ixgbe_setup_fc_generic");
211 /* Validate the requested mode */
212 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
213 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
214 "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
215 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
220 * 10gig parts do not have a word in the EEPROM to determine the
221 * default flow control setting, so we explicitly set it to full.
223 if (hw->fc.requested_mode == ixgbe_fc_default)
224 hw->fc.requested_mode = ixgbe_fc_full;
227 * Set up the 1G and 10G flow control advertisement registers so the
228 * HW will be able to do fc autoneg once the cable is plugged in. If
229 * we link at 10G, the 1G advertisement is harmless and vice versa.
231 switch (hw->phy.media_type) {
232 case ixgbe_media_type_backplane:
233 /* some MAC's need RMW protection on AUTOC */
234 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp);
235 if (ret_val != IXGBE_SUCCESS)
238 /* fall through - only backplane uses autoc */
239 case ixgbe_media_type_fiber_qsfp:
240 case ixgbe_media_type_fiber:
241 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
244 case ixgbe_media_type_copper:
245 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
246 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu);
253 * The possible values of fc.requested_mode are:
254 * 0: Flow control is completely disabled
255 * 1: Rx flow control is enabled (we can receive pause frames,
256 * but not send pause frames).
257 * 2: Tx flow control is enabled (we can send pause frames but
258 * we do not support receiving pause frames).
259 * 3: Both Rx and Tx flow control (symmetric) are enabled.
262 switch (hw->fc.requested_mode) {
264 /* Flow control completely disabled by software override. */
265 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
266 if (hw->phy.media_type == ixgbe_media_type_backplane)
267 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
268 IXGBE_AUTOC_ASM_PAUSE);
269 else if (hw->phy.media_type == ixgbe_media_type_copper)
270 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
272 case ixgbe_fc_tx_pause:
274 * Tx Flow control is enabled, and Rx Flow control is
275 * disabled by software override.
277 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
278 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
279 if (hw->phy.media_type == ixgbe_media_type_backplane) {
280 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
281 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
282 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
283 reg_cu |= IXGBE_TAF_ASM_PAUSE;
284 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
287 case ixgbe_fc_rx_pause:
289 * Rx Flow control is enabled and Tx Flow control is
290 * disabled by software override. Since there really
291 * isn't a way to advertise that we are capable of RX
292 * Pause ONLY, we will advertise that we support both
293 * symmetric and asymmetric Rx PAUSE, as such we fall
294 * through to the fc_full statement. Later, we will
295 * disable the adapter's ability to send PAUSE frames.
298 /* Flow control (both Rx and Tx) is enabled by SW override. */
299 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
300 if (hw->phy.media_type == ixgbe_media_type_backplane)
301 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
302 IXGBE_AUTOC_ASM_PAUSE;
303 else if (hw->phy.media_type == ixgbe_media_type_copper)
304 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
307 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
308 "Flow control param set incorrectly\n");
309 ret_val = IXGBE_ERR_CONFIG;
314 if (hw->mac.type < ixgbe_mac_X540) {
316 * Enable auto-negotiation between the MAC & PHY;
317 * the MAC will advertise clause 37 flow control.
319 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
320 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
322 /* Disable AN timeout */
323 if (hw->fc.strict_ieee)
324 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
326 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
327 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
331 * AUTOC restart handles negotiation of 1G and 10G on backplane
332 * and copper. There is no need to set the PCS1GCTL register.
335 if (hw->phy.media_type == ixgbe_media_type_backplane) {
336 reg_bp |= IXGBE_AUTOC_AN_RESTART;
337 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
340 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
341 (ixgbe_device_supports_autoneg_fc(hw))) {
342 hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
343 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
346 DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
352 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
353 * @hw: pointer to hardware structure
355 * Starts the hardware by filling the bus info structure and media type, clears
356 * all on chip counters, initializes receive address registers, multicast
357 * table, VLAN filter table, calls routine to set up link and flow control
358 * settings, and leaves transmit and receive units disabled and uninitialized
360 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
366 DEBUGFUNC("ixgbe_start_hw_generic");
368 /* Set the media type */
369 hw->phy.media_type = hw->mac.ops.get_media_type(hw);
371 /* PHY ops initialization must be done in reset_hw() */
373 /* Clear the VLAN filter table */
374 hw->mac.ops.clear_vfta(hw);
376 /* Clear statistics registers */
377 hw->mac.ops.clear_hw_cntrs(hw);
379 /* Set No Snoop Disable */
380 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
381 ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
382 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
383 IXGBE_WRITE_FLUSH(hw);
385 /* Setup flow control */
386 ret_val = ixgbe_setup_fc(hw);
387 if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
388 DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
392 /* Cache bit indicating need for crosstalk fix */
393 switch (hw->mac.type) {
394 case ixgbe_mac_82599EB:
395 case ixgbe_mac_X550EM_x:
396 case ixgbe_mac_X550EM_a:
397 hw->mac.ops.get_device_caps(hw, &device_caps);
398 if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
399 hw->need_crosstalk_fix = false;
401 hw->need_crosstalk_fix = true;
404 hw->need_crosstalk_fix = false;
408 /* Clear adapter stopped flag */
409 hw->adapter_stopped = false;
411 return IXGBE_SUCCESS;
415 * ixgbe_start_hw_gen2 - Init sequence for common device family
416 * @hw: pointer to hw structure
418 * Performs the init sequence common to the second generation
420 * Devices in the second generation:
424 void ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
429 /* Clear the rate limiters */
430 for (i = 0; i < hw->mac.max_tx_queues; i++) {
431 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
432 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
434 IXGBE_WRITE_FLUSH(hw);
436 /* Disable relaxed ordering */
437 for (i = 0; i < hw->mac.max_tx_queues; i++) {
438 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
439 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
440 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
443 for (i = 0; i < hw->mac.max_rx_queues; i++) {
444 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
445 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
446 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
447 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
452 * ixgbe_init_hw_generic - Generic hardware initialization
453 * @hw: pointer to hardware structure
455 * Initialize the hardware by resetting the hardware, filling the bus info
456 * structure and media type, clears all on chip counters, initializes receive
457 * address registers, multicast table, VLAN filter table, calls routine to set
458 * up link and flow control settings, and leaves transmit and receive units
459 * disabled and uninitialized
461 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
465 DEBUGFUNC("ixgbe_init_hw_generic");
467 /* Reset the hardware */
468 status = hw->mac.ops.reset_hw(hw);
470 if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
472 status = hw->mac.ops.start_hw(hw);
475 /* Initialize the LED link active for LED blink support */
476 if (hw->mac.ops.init_led_link_act)
477 hw->mac.ops.init_led_link_act(hw);
479 if (status != IXGBE_SUCCESS)
480 DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
486 * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
487 * @hw: pointer to hardware structure
489 * Clears all hardware statistics counters by reading them from the hardware
490 * Statistics counters are clear on read.
492 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
496 DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
498 IXGBE_READ_REG(hw, IXGBE_CRCERRS);
499 IXGBE_READ_REG(hw, IXGBE_ILLERRC);
500 IXGBE_READ_REG(hw, IXGBE_ERRBC);
501 IXGBE_READ_REG(hw, IXGBE_MSPDC);
502 for (i = 0; i < 8; i++)
503 IXGBE_READ_REG(hw, IXGBE_MPC(i));
505 IXGBE_READ_REG(hw, IXGBE_MLFC);
506 IXGBE_READ_REG(hw, IXGBE_MRFC);
507 IXGBE_READ_REG(hw, IXGBE_RLEC);
508 IXGBE_READ_REG(hw, IXGBE_LXONTXC);
509 IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
510 if (hw->mac.type >= ixgbe_mac_82599EB) {
511 IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
512 IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
514 IXGBE_READ_REG(hw, IXGBE_LXONRXC);
515 IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
518 for (i = 0; i < 8; i++) {
519 IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
520 IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
521 if (hw->mac.type >= ixgbe_mac_82599EB) {
522 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
523 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
525 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
526 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
529 if (hw->mac.type >= ixgbe_mac_82599EB)
530 for (i = 0; i < 8; i++)
531 IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
532 IXGBE_READ_REG(hw, IXGBE_PRC64);
533 IXGBE_READ_REG(hw, IXGBE_PRC127);
534 IXGBE_READ_REG(hw, IXGBE_PRC255);
535 IXGBE_READ_REG(hw, IXGBE_PRC511);
536 IXGBE_READ_REG(hw, IXGBE_PRC1023);
537 IXGBE_READ_REG(hw, IXGBE_PRC1522);
538 IXGBE_READ_REG(hw, IXGBE_GPRC);
539 IXGBE_READ_REG(hw, IXGBE_BPRC);
540 IXGBE_READ_REG(hw, IXGBE_MPRC);
541 IXGBE_READ_REG(hw, IXGBE_GPTC);
542 IXGBE_READ_REG(hw, IXGBE_GORCL);
543 IXGBE_READ_REG(hw, IXGBE_GORCH);
544 IXGBE_READ_REG(hw, IXGBE_GOTCL);
545 IXGBE_READ_REG(hw, IXGBE_GOTCH);
546 if (hw->mac.type == ixgbe_mac_82598EB)
547 for (i = 0; i < 8; i++)
548 IXGBE_READ_REG(hw, IXGBE_RNBC(i));
549 IXGBE_READ_REG(hw, IXGBE_RUC);
550 IXGBE_READ_REG(hw, IXGBE_RFC);
551 IXGBE_READ_REG(hw, IXGBE_ROC);
552 IXGBE_READ_REG(hw, IXGBE_RJC);
553 IXGBE_READ_REG(hw, IXGBE_MNGPRC);
554 IXGBE_READ_REG(hw, IXGBE_MNGPDC);
555 IXGBE_READ_REG(hw, IXGBE_MNGPTC);
556 IXGBE_READ_REG(hw, IXGBE_TORL);
557 IXGBE_READ_REG(hw, IXGBE_TORH);
558 IXGBE_READ_REG(hw, IXGBE_TPR);
559 IXGBE_READ_REG(hw, IXGBE_TPT);
560 IXGBE_READ_REG(hw, IXGBE_PTC64);
561 IXGBE_READ_REG(hw, IXGBE_PTC127);
562 IXGBE_READ_REG(hw, IXGBE_PTC255);
563 IXGBE_READ_REG(hw, IXGBE_PTC511);
564 IXGBE_READ_REG(hw, IXGBE_PTC1023);
565 IXGBE_READ_REG(hw, IXGBE_PTC1522);
566 IXGBE_READ_REG(hw, IXGBE_MPTC);
567 IXGBE_READ_REG(hw, IXGBE_BPTC);
568 for (i = 0; i < 16; i++) {
569 IXGBE_READ_REG(hw, IXGBE_QPRC(i));
570 IXGBE_READ_REG(hw, IXGBE_QPTC(i));
571 if (hw->mac.type >= ixgbe_mac_82599EB) {
572 IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
573 IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
574 IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
575 IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
576 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
578 IXGBE_READ_REG(hw, IXGBE_QBRC(i));
579 IXGBE_READ_REG(hw, IXGBE_QBTC(i));
583 if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
585 ixgbe_identify_phy(hw);
586 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
587 IXGBE_MDIO_PCS_DEV_TYPE, &i);
588 hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH,
589 IXGBE_MDIO_PCS_DEV_TYPE, &i);
590 hw->phy.ops.read_reg(hw, IXGBE_LDPCECL,
591 IXGBE_MDIO_PCS_DEV_TYPE, &i);
592 hw->phy.ops.read_reg(hw, IXGBE_LDPCECH,
593 IXGBE_MDIO_PCS_DEV_TYPE, &i);
596 return IXGBE_SUCCESS;
600 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
601 * @hw: pointer to hardware structure
602 * @pba_num: stores the part number string from the EEPROM
603 * @pba_num_size: part number string buffer length
605 * Reads the part number string from the EEPROM.
607 s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
616 DEBUGFUNC("ixgbe_read_pba_string_generic");
618 if (pba_num == NULL) {
619 DEBUGOUT("PBA string buffer was null\n");
620 return IXGBE_ERR_INVALID_ARGUMENT;
623 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
625 DEBUGOUT("NVM Read Error\n");
629 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
631 DEBUGOUT("NVM Read Error\n");
636 * if data is not ptr guard the PBA must be in legacy format which
637 * means pba_ptr is actually our second data word for the PBA number
638 * and we can decode it into an ascii string
640 if (data != IXGBE_PBANUM_PTR_GUARD) {
641 DEBUGOUT("NVM PBA number is not stored as string\n");
643 /* we will need 11 characters to store the PBA */
644 if (pba_num_size < 11) {
645 DEBUGOUT("PBA string buffer too small\n");
646 return IXGBE_ERR_NO_SPACE;
649 /* extract hex string from data and pba_ptr */
650 pba_num[0] = (data >> 12) & 0xF;
651 pba_num[1] = (data >> 8) & 0xF;
652 pba_num[2] = (data >> 4) & 0xF;
653 pba_num[3] = data & 0xF;
654 pba_num[4] = (pba_ptr >> 12) & 0xF;
655 pba_num[5] = (pba_ptr >> 8) & 0xF;
658 pba_num[8] = (pba_ptr >> 4) & 0xF;
659 pba_num[9] = pba_ptr & 0xF;
661 /* put a null character on the end of our string */
664 /* switch all the data but the '-' to hex char */
665 for (offset = 0; offset < 10; offset++) {
666 if (pba_num[offset] < 0xA)
667 pba_num[offset] += '0';
668 else if (pba_num[offset] < 0x10)
669 pba_num[offset] += 'A' - 0xA;
672 return IXGBE_SUCCESS;
675 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
677 DEBUGOUT("NVM Read Error\n");
681 if (length == 0xFFFF || length == 0) {
682 DEBUGOUT("NVM PBA number section invalid length\n");
683 return IXGBE_ERR_PBA_SECTION;
686 /* check if pba_num buffer is big enough */
687 if (pba_num_size < (((u32)length * 2) - 1)) {
688 DEBUGOUT("PBA string buffer too small\n");
689 return IXGBE_ERR_NO_SPACE;
692 /* trim pba length from start of string */
696 for (offset = 0; offset < length; offset++) {
697 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
699 DEBUGOUT("NVM Read Error\n");
702 pba_num[offset * 2] = (u8)(data >> 8);
703 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
705 pba_num[offset * 2] = '\0';
707 return IXGBE_SUCCESS;
711 * ixgbe_read_pba_num_generic - Reads part number from EEPROM
712 * @hw: pointer to hardware structure
713 * @pba_num: stores the part number from the EEPROM
715 * Reads the part number from the EEPROM.
717 s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
722 DEBUGFUNC("ixgbe_read_pba_num_generic");
724 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
726 DEBUGOUT("NVM Read Error\n");
728 } else if (data == IXGBE_PBANUM_PTR_GUARD) {
729 DEBUGOUT("NVM Not supported\n");
730 return IXGBE_NOT_IMPLEMENTED;
732 *pba_num = (u32)(data << 16);
734 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
736 DEBUGOUT("NVM Read Error\n");
739 *pba_num |= (u32)data;
741 return IXGBE_SUCCESS;
746 * @hw: pointer to the HW structure
747 * @eeprom_buf: optional pointer to EEPROM image
748 * @eeprom_buf_size: size of EEPROM image in words
749 * @max_pba_block_size: PBA block size limit
750 * @pba: pointer to output PBA structure
752 * Reads PBA from EEPROM image when eeprom_buf is not NULL.
753 * Reads PBA from physical EEPROM device when eeprom_buf is NULL.
756 s32 ixgbe_read_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
757 u32 eeprom_buf_size, u16 max_pba_block_size,
758 struct ixgbe_pba *pba)
764 return IXGBE_ERR_PARAM;
766 if (eeprom_buf == NULL) {
767 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
772 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
773 pba->word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
774 pba->word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
776 return IXGBE_ERR_PARAM;
780 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
781 if (pba->pba_block == NULL)
782 return IXGBE_ERR_PARAM;
784 ret_val = ixgbe_get_pba_block_size(hw, eeprom_buf,
790 if (pba_block_size > max_pba_block_size)
791 return IXGBE_ERR_PARAM;
793 if (eeprom_buf == NULL) {
794 ret_val = hw->eeprom.ops.read_buffer(hw, pba->word[1],
800 if (eeprom_buf_size > (u32)(pba->word[1] +
802 memcpy(pba->pba_block,
803 &eeprom_buf[pba->word[1]],
804 pba_block_size * sizeof(u16));
806 return IXGBE_ERR_PARAM;
811 return IXGBE_SUCCESS;
815 * ixgbe_write_pba_raw
816 * @hw: pointer to the HW structure
817 * @eeprom_buf: optional pointer to EEPROM image
818 * @eeprom_buf_size: size of EEPROM image in words
819 * @pba: pointer to PBA structure
821 * Writes PBA to EEPROM image when eeprom_buf is not NULL.
822 * Writes PBA to physical EEPROM device when eeprom_buf is NULL.
825 s32 ixgbe_write_pba_raw(struct ixgbe_hw *hw, u16 *eeprom_buf,
826 u32 eeprom_buf_size, struct ixgbe_pba *pba)
831 return IXGBE_ERR_PARAM;
833 if (eeprom_buf == NULL) {
834 ret_val = hw->eeprom.ops.write_buffer(hw, IXGBE_PBANUM0_PTR, 2,
839 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
840 eeprom_buf[IXGBE_PBANUM0_PTR] = pba->word[0];
841 eeprom_buf[IXGBE_PBANUM1_PTR] = pba->word[1];
843 return IXGBE_ERR_PARAM;
847 if (pba->word[0] == IXGBE_PBANUM_PTR_GUARD) {
848 if (pba->pba_block == NULL)
849 return IXGBE_ERR_PARAM;
851 if (eeprom_buf == NULL) {
852 ret_val = hw->eeprom.ops.write_buffer(hw, pba->word[1],
858 if (eeprom_buf_size > (u32)(pba->word[1] +
859 pba->pba_block[0])) {
860 memcpy(&eeprom_buf[pba->word[1]],
862 pba->pba_block[0] * sizeof(u16));
864 return IXGBE_ERR_PARAM;
869 return IXGBE_SUCCESS;
873 * ixgbe_get_pba_block_size
874 * @hw: pointer to the HW structure
875 * @eeprom_buf: optional pointer to EEPROM image
876 * @eeprom_buf_size: size of EEPROM image in words
877 * @pba_data_size: pointer to output variable
879 * Returns the size of the PBA block in words. Function operates on EEPROM
880 * image if the eeprom_buf pointer is not NULL otherwise it accesses physical
884 s32 ixgbe_get_pba_block_size(struct ixgbe_hw *hw, u16 *eeprom_buf,
885 u32 eeprom_buf_size, u16 *pba_block_size)
891 DEBUGFUNC("ixgbe_get_pba_block_size");
893 if (eeprom_buf == NULL) {
894 ret_val = hw->eeprom.ops.read_buffer(hw, IXGBE_PBANUM0_PTR, 2,
899 if (eeprom_buf_size > IXGBE_PBANUM1_PTR) {
900 pba_word[0] = eeprom_buf[IXGBE_PBANUM0_PTR];
901 pba_word[1] = eeprom_buf[IXGBE_PBANUM1_PTR];
903 return IXGBE_ERR_PARAM;
907 if (pba_word[0] == IXGBE_PBANUM_PTR_GUARD) {
908 if (eeprom_buf == NULL) {
909 ret_val = hw->eeprom.ops.read(hw, pba_word[1] + 0,
914 if (eeprom_buf_size > pba_word[1])
915 length = eeprom_buf[pba_word[1] + 0];
917 return IXGBE_ERR_PARAM;
920 if (length == 0xFFFF || length == 0)
921 return IXGBE_ERR_PBA_SECTION;
923 /* PBA number in legacy format, there is no PBA Block. */
927 if (pba_block_size != NULL)
928 *pba_block_size = length;
930 return IXGBE_SUCCESS;
934 * ixgbe_get_mac_addr_generic - Generic get MAC address
935 * @hw: pointer to hardware structure
936 * @mac_addr: Adapter MAC address
938 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
939 * A reset of the adapter must be performed prior to calling this function
940 * in order for the MAC address to have been loaded from the EEPROM into RAR0
942 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
948 DEBUGFUNC("ixgbe_get_mac_addr_generic");
950 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
951 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
953 for (i = 0; i < 4; i++)
954 mac_addr[i] = (u8)(rar_low >> (i*8));
956 for (i = 0; i < 2; i++)
957 mac_addr[i+4] = (u8)(rar_high >> (i*8));
959 return IXGBE_SUCCESS;
963 * ixgbe_set_pci_config_data_generic - Generic store PCI bus info
964 * @hw: pointer to hardware structure
965 * @link_status: the link status returned by the PCI config space
967 * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure
969 void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
971 struct ixgbe_mac_info *mac = &hw->mac;
973 if (hw->bus.type == ixgbe_bus_type_unknown)
974 hw->bus.type = ixgbe_bus_type_pci_express;
976 switch (link_status & IXGBE_PCI_LINK_WIDTH) {
977 case IXGBE_PCI_LINK_WIDTH_1:
978 hw->bus.width = ixgbe_bus_width_pcie_x1;
980 case IXGBE_PCI_LINK_WIDTH_2:
981 hw->bus.width = ixgbe_bus_width_pcie_x2;
983 case IXGBE_PCI_LINK_WIDTH_4:
984 hw->bus.width = ixgbe_bus_width_pcie_x4;
986 case IXGBE_PCI_LINK_WIDTH_8:
987 hw->bus.width = ixgbe_bus_width_pcie_x8;
990 hw->bus.width = ixgbe_bus_width_unknown;
994 switch (link_status & IXGBE_PCI_LINK_SPEED) {
995 case IXGBE_PCI_LINK_SPEED_2500:
996 hw->bus.speed = ixgbe_bus_speed_2500;
998 case IXGBE_PCI_LINK_SPEED_5000:
999 hw->bus.speed = ixgbe_bus_speed_5000;
1001 case IXGBE_PCI_LINK_SPEED_8000:
1002 hw->bus.speed = ixgbe_bus_speed_8000;
1005 hw->bus.speed = ixgbe_bus_speed_unknown;
1009 mac->ops.set_lan_id(hw);
1013 * ixgbe_get_bus_info_generic - Generic set PCI bus info
1014 * @hw: pointer to hardware structure
1016 * Gets the PCI bus info (speed, width, type) then calls helper function to
1017 * store this data within the ixgbe_hw structure.
1019 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
1023 DEBUGFUNC("ixgbe_get_bus_info_generic");
1025 /* Get the negotiated link width and speed from PCI config space */
1026 link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
1028 ixgbe_set_pci_config_data_generic(hw, link_status);
1030 return IXGBE_SUCCESS;
1034 * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
1035 * @hw: pointer to the HW structure
1037 * Determines the LAN function id by reading memory-mapped registers and swaps
1038 * the port value if requested, and set MAC instance for devices that share
1041 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
1043 struct ixgbe_bus_info *bus = &hw->bus;
1047 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
1049 reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
1050 bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
1051 bus->lan_id = (u8)bus->func;
1053 /* check for a port swap */
1054 reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
1055 if (reg & IXGBE_FACTPS_LFS)
1058 /* Get MAC instance from EEPROM for configuring CS4227 */
1059 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
1060 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
1061 bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
1062 IXGBE_EE_CTRL_4_INST_ID_SHIFT;
1067 * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
1068 * @hw: pointer to hardware structure
1070 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
1071 * disables transmit and receive units. The adapter_stopped flag is used by
1072 * the shared code and drivers to determine if the adapter is in a stopped
1073 * state and should not touch the hardware.
1075 s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
1080 DEBUGFUNC("ixgbe_stop_adapter_generic");
1083 * Set the adapter_stopped flag so other driver functions stop touching
1086 hw->adapter_stopped = true;
1088 /* Disable the receive unit */
1089 ixgbe_disable_rx(hw);
1091 /* Clear interrupt mask to stop interrupts from being generated */
1092 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1094 /* Clear any pending interrupts, flush previous writes */
1095 IXGBE_READ_REG(hw, IXGBE_EICR);
1097 /* Disable the transmit unit. Each queue must be disabled. */
1098 for (i = 0; i < hw->mac.max_tx_queues; i++)
1099 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
1101 /* Disable the receive unit by stopping each queue */
1102 for (i = 0; i < hw->mac.max_rx_queues; i++) {
1103 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
1104 reg_val &= ~IXGBE_RXDCTL_ENABLE;
1105 reg_val |= IXGBE_RXDCTL_SWFLSH;
1106 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
1109 /* flush all queues disables */
1110 IXGBE_WRITE_FLUSH(hw);
1114 * Prevent the PCI-E bus from hanging by disabling PCI-E master
1115 * access and verify no pending requests
1117 return ixgbe_disable_pcie_master(hw);
1121 * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
1122 * @hw: pointer to hardware structure
1124 * Store the index for the link active LED. This will be used to support
1127 s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
1129 struct ixgbe_mac_info *mac = &hw->mac;
1130 u32 led_reg, led_mode;
1133 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1135 /* Get LED link active from the LEDCTL register */
1136 for (i = 0; i < 4; i++) {
1137 led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
1139 if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
1140 IXGBE_LED_LINK_ACTIVE) {
1141 mac->led_link_act = i;
1142 return IXGBE_SUCCESS;
1147 * If LEDCTL register does not have the LED link active set, then use
1148 * known MAC defaults.
1150 switch (hw->mac.type) {
1151 case ixgbe_mac_X550EM_a:
1152 case ixgbe_mac_X550EM_x:
1153 mac->led_link_act = 1;
1156 mac->led_link_act = 2;
1158 return IXGBE_SUCCESS;
1162 * ixgbe_led_on_generic - Turns on the software controllable LEDs.
1163 * @hw: pointer to hardware structure
1164 * @index: led number to turn on
1166 s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
1168 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1170 DEBUGFUNC("ixgbe_led_on_generic");
1173 return IXGBE_ERR_PARAM;
1175 /* To turn on the LED, set mode to ON. */
1176 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1177 led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
1178 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1179 IXGBE_WRITE_FLUSH(hw);
1181 return IXGBE_SUCCESS;
1185 * ixgbe_led_off_generic - Turns off the software controllable LEDs.
1186 * @hw: pointer to hardware structure
1187 * @index: led number to turn off
1189 s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
1191 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1193 DEBUGFUNC("ixgbe_led_off_generic");
1196 return IXGBE_ERR_PARAM;
1198 /* To turn off the LED, set mode to OFF. */
1199 led_reg &= ~IXGBE_LED_MODE_MASK(index);
1200 led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
1201 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
1202 IXGBE_WRITE_FLUSH(hw);
1204 return IXGBE_SUCCESS;
1208 * ixgbe_init_eeprom_params_generic - Initialize EEPROM params
1209 * @hw: pointer to hardware structure
1211 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
1212 * ixgbe_hw struct in order to set up EEPROM access.
1214 s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
1216 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
1220 DEBUGFUNC("ixgbe_init_eeprom_params_generic");
1222 if (eeprom->type == ixgbe_eeprom_uninitialized) {
1223 eeprom->type = ixgbe_eeprom_none;
1224 /* Set default semaphore delay to 10ms which is a well
1226 eeprom->semaphore_delay = 10;
1227 /* Clear EEPROM page size, it will be initialized as needed */
1228 eeprom->word_page_size = 0;
1231 * Check for EEPROM present first.
1232 * If not present leave as none
1234 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1235 if (eec & IXGBE_EEC_PRES) {
1236 eeprom->type = ixgbe_eeprom_spi;
1239 * SPI EEPROM is assumed here. This code would need to
1240 * change if a future EEPROM is not SPI.
1242 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
1243 IXGBE_EEC_SIZE_SHIFT);
1244 eeprom->word_size = 1 << (eeprom_size +
1245 IXGBE_EEPROM_WORD_SIZE_SHIFT);
1248 if (eec & IXGBE_EEC_ADDR_SIZE)
1249 eeprom->address_bits = 16;
1251 eeprom->address_bits = 8;
1252 DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
1253 "%d\n", eeprom->type, eeprom->word_size,
1254 eeprom->address_bits);
1257 return IXGBE_SUCCESS;
1261 * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
1262 * @hw: pointer to hardware structure
1263 * @offset: offset within the EEPROM to write
1264 * @words: number of word(s)
1265 * @data: 16 bit word(s) to write to EEPROM
1267 * Reads 16 bit word(s) from EEPROM through bit-bang method
1269 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1270 u16 words, u16 *data)
1272 s32 status = IXGBE_SUCCESS;
1275 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
1277 hw->eeprom.ops.init_params(hw);
1280 status = IXGBE_ERR_INVALID_ARGUMENT;
1284 if (offset + words > hw->eeprom.word_size) {
1285 status = IXGBE_ERR_EEPROM;
1290 * The EEPROM page size cannot be queried from the chip. We do lazy
1291 * initialization. It is worth to do that when we write large buffer.
1293 if ((hw->eeprom.word_page_size == 0) &&
1294 (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
1295 ixgbe_detect_eeprom_page_size_generic(hw, offset);
1298 * We cannot hold synchronization semaphores for too long
1299 * to avoid other entity starvation. However it is more efficient
1300 * to read in bursts than synchronizing access for each word.
1302 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1303 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1304 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1305 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
1308 if (status != IXGBE_SUCCESS)
1317 * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
1318 * @hw: pointer to hardware structure
1319 * @offset: offset within the EEPROM to be written to
1320 * @words: number of word(s)
1321 * @data: 16 bit word(s) to be written to the EEPROM
1323 * If ixgbe_eeprom_update_checksum is not called after this function, the
1324 * EEPROM will most likely contain an invalid checksum.
1326 STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1327 u16 words, u16 *data)
1333 u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
1335 DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
1337 /* Prepare the EEPROM for writing */
1338 status = ixgbe_acquire_eeprom(hw);
1340 if (status == IXGBE_SUCCESS) {
1341 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1342 ixgbe_release_eeprom(hw);
1343 status = IXGBE_ERR_EEPROM;
1347 if (status == IXGBE_SUCCESS) {
1348 for (i = 0; i < words; i++) {
1349 ixgbe_standby_eeprom(hw);
1351 /* Send the WRITE ENABLE command (8 bit opcode ) */
1352 ixgbe_shift_out_eeprom_bits(hw,
1353 IXGBE_EEPROM_WREN_OPCODE_SPI,
1354 IXGBE_EEPROM_OPCODE_BITS);
1356 ixgbe_standby_eeprom(hw);
1359 * Some SPI eeproms use the 8th address bit embedded
1362 if ((hw->eeprom.address_bits == 8) &&
1363 ((offset + i) >= 128))
1364 write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1366 /* Send the Write command (8-bit opcode + addr) */
1367 ixgbe_shift_out_eeprom_bits(hw, write_opcode,
1368 IXGBE_EEPROM_OPCODE_BITS);
1369 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1370 hw->eeprom.address_bits);
1372 page_size = hw->eeprom.word_page_size;
1374 /* Send the data in burst via SPI*/
1377 word = (word >> 8) | (word << 8);
1378 ixgbe_shift_out_eeprom_bits(hw, word, 16);
1383 /* do not wrap around page */
1384 if (((offset + i) & (page_size - 1)) ==
1387 } while (++i < words);
1389 ixgbe_standby_eeprom(hw);
1392 /* Done with writing - release the EEPROM */
1393 ixgbe_release_eeprom(hw);
1400 * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
1401 * @hw: pointer to hardware structure
1402 * @offset: offset within the EEPROM to be written to
1403 * @data: 16 bit word to be written to the EEPROM
1405 * If ixgbe_eeprom_update_checksum is not called after this function, the
1406 * EEPROM will most likely contain an invalid checksum.
1408 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1412 DEBUGFUNC("ixgbe_write_eeprom_generic");
1414 hw->eeprom.ops.init_params(hw);
1416 if (offset >= hw->eeprom.word_size) {
1417 status = IXGBE_ERR_EEPROM;
1421 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
1428 * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
1429 * @hw: pointer to hardware structure
1430 * @offset: offset within the EEPROM to be read
1431 * @data: read 16 bit words(s) from EEPROM
1432 * @words: number of word(s)
1434 * Reads 16 bit word(s) from EEPROM through bit-bang method
1436 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1437 u16 words, u16 *data)
1439 s32 status = IXGBE_SUCCESS;
1442 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
1444 hw->eeprom.ops.init_params(hw);
1447 status = IXGBE_ERR_INVALID_ARGUMENT;
1451 if (offset + words > hw->eeprom.word_size) {
1452 status = IXGBE_ERR_EEPROM;
1457 * We cannot hold synchronization semaphores for too long
1458 * to avoid other entity starvation. However it is more efficient
1459 * to read in bursts than synchronizing access for each word.
1461 for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
1462 count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
1463 IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
1465 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
1468 if (status != IXGBE_SUCCESS)
1477 * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
1478 * @hw: pointer to hardware structure
1479 * @offset: offset within the EEPROM to be read
1480 * @words: number of word(s)
1481 * @data: read 16 bit word(s) from EEPROM
1483 * Reads 16 bit word(s) from EEPROM through bit-bang method
1485 STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
1486 u16 words, u16 *data)
1490 u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
1493 DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
1495 /* Prepare the EEPROM for reading */
1496 status = ixgbe_acquire_eeprom(hw);
1498 if (status == IXGBE_SUCCESS) {
1499 if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
1500 ixgbe_release_eeprom(hw);
1501 status = IXGBE_ERR_EEPROM;
1505 if (status == IXGBE_SUCCESS) {
1506 for (i = 0; i < words; i++) {
1507 ixgbe_standby_eeprom(hw);
1509 * Some SPI eeproms use the 8th address bit embedded
1512 if ((hw->eeprom.address_bits == 8) &&
1513 ((offset + i) >= 128))
1514 read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
1516 /* Send the READ command (opcode + addr) */
1517 ixgbe_shift_out_eeprom_bits(hw, read_opcode,
1518 IXGBE_EEPROM_OPCODE_BITS);
1519 ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
1520 hw->eeprom.address_bits);
1522 /* Read the data. */
1523 word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
1524 data[i] = (word_in >> 8) | (word_in << 8);
1527 /* End this read operation */
1528 ixgbe_release_eeprom(hw);
1535 * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
1536 * @hw: pointer to hardware structure
1537 * @offset: offset within the EEPROM to be read
1538 * @data: read 16 bit value from EEPROM
1540 * Reads 16 bit value from EEPROM through bit-bang method
1542 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
1547 DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
1549 hw->eeprom.ops.init_params(hw);
1551 if (offset >= hw->eeprom.word_size) {
1552 status = IXGBE_ERR_EEPROM;
1556 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1563 * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
1564 * @hw: pointer to hardware structure
1565 * @offset: offset of word in the EEPROM to read
1566 * @words: number of word(s)
1567 * @data: 16 bit word(s) from the EEPROM
1569 * Reads a 16 bit word(s) from the EEPROM using the EERD register.
1571 s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1572 u16 words, u16 *data)
1575 s32 status = IXGBE_SUCCESS;
1578 DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
1580 hw->eeprom.ops.init_params(hw);
1583 status = IXGBE_ERR_INVALID_ARGUMENT;
1584 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1588 if (offset >= hw->eeprom.word_size) {
1589 status = IXGBE_ERR_EEPROM;
1590 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1594 for (i = 0; i < words; i++) {
1595 eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1596 IXGBE_EEPROM_RW_REG_START;
1598 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
1599 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
1601 if (status == IXGBE_SUCCESS) {
1602 data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
1603 IXGBE_EEPROM_RW_REG_DATA);
1605 DEBUGOUT("Eeprom read timed out\n");
1614 * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
1615 * @hw: pointer to hardware structure
1616 * @offset: offset within the EEPROM to be used as a scratch pad
1618 * Discover EEPROM page size by writing marching data at given offset.
1619 * This function is called only when we are writing a new large buffer
1620 * at given offset so the data would be overwritten anyway.
1622 STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
1625 u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
1626 s32 status = IXGBE_SUCCESS;
1629 DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
1631 for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
1634 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
1635 status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
1636 IXGBE_EEPROM_PAGE_SIZE_MAX, data);
1637 hw->eeprom.word_page_size = 0;
1638 if (status != IXGBE_SUCCESS)
1641 status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
1642 if (status != IXGBE_SUCCESS)
1646 * When writing in burst more than the actual page size
1647 * EEPROM address wraps around current page.
1649 hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
1651 DEBUGOUT1("Detected EEPROM page size = %d words.",
1652 hw->eeprom.word_page_size);
1658 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
1659 * @hw: pointer to hardware structure
1660 * @offset: offset of word in the EEPROM to read
1661 * @data: word read from the EEPROM
1663 * Reads a 16 bit word from the EEPROM using the EERD register.
1665 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
1667 return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
1671 * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
1672 * @hw: pointer to hardware structure
1673 * @offset: offset of word in the EEPROM to write
1674 * @words: number of word(s)
1675 * @data: word(s) write to the EEPROM
1677 * Write a 16 bit word(s) to the EEPROM using the EEWR register.
1679 s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
1680 u16 words, u16 *data)
1683 s32 status = IXGBE_SUCCESS;
1686 DEBUGFUNC("ixgbe_write_eewr_generic");
1688 hw->eeprom.ops.init_params(hw);
1691 status = IXGBE_ERR_INVALID_ARGUMENT;
1692 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words");
1696 if (offset >= hw->eeprom.word_size) {
1697 status = IXGBE_ERR_EEPROM;
1698 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset");
1702 for (i = 0; i < words; i++) {
1703 eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
1704 (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
1705 IXGBE_EEPROM_RW_REG_START;
1707 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1708 if (status != IXGBE_SUCCESS) {
1709 DEBUGOUT("Eeprom write EEWR timed out\n");
1713 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
1715 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
1716 if (status != IXGBE_SUCCESS) {
1717 DEBUGOUT("Eeprom write EEWR timed out\n");
1727 * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
1728 * @hw: pointer to hardware structure
1729 * @offset: offset of word in the EEPROM to write
1730 * @data: word write to the EEPROM
1732 * Write a 16 bit word to the EEPROM using the EEWR register.
1734 s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
1736 return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
1740 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
1741 * @hw: pointer to hardware structure
1742 * @ee_reg: EEPROM flag for polling
1744 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
1745 * read or write is done respectively.
1747 s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
1751 s32 status = IXGBE_ERR_EEPROM;
1753 DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
1755 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
1756 if (ee_reg == IXGBE_NVM_POLL_READ)
1757 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
1759 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
1761 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
1762 status = IXGBE_SUCCESS;
1768 if (i == IXGBE_EERD_EEWR_ATTEMPTS)
1769 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1770 "EEPROM read/write done polling timed out");
1776 * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
1777 * @hw: pointer to hardware structure
1779 * Prepares EEPROM for access using bit-bang method. This function should
1780 * be called before issuing a command to the EEPROM.
1782 STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
1784 s32 status = IXGBE_SUCCESS;
1788 DEBUGFUNC("ixgbe_acquire_eeprom");
1790 if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
1792 status = IXGBE_ERR_SWFW_SYNC;
1794 if (status == IXGBE_SUCCESS) {
1795 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1797 /* Request EEPROM Access */
1798 eec |= IXGBE_EEC_REQ;
1799 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1801 for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
1802 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1803 if (eec & IXGBE_EEC_GNT)
1808 /* Release if grant not acquired */
1809 if (!(eec & IXGBE_EEC_GNT)) {
1810 eec &= ~IXGBE_EEC_REQ;
1811 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1812 DEBUGOUT("Could not acquire EEPROM grant\n");
1814 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
1815 status = IXGBE_ERR_EEPROM;
1818 /* Setup EEPROM for Read/Write */
1819 if (status == IXGBE_SUCCESS) {
1820 /* Clear CS and SK */
1821 eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
1822 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1823 IXGBE_WRITE_FLUSH(hw);
1831 * ixgbe_get_eeprom_semaphore - Get hardware semaphore
1832 * @hw: pointer to hardware structure
1834 * Sets the hardware semaphores so EEPROM access can occur for bit-bang method
1836 STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
1838 s32 status = IXGBE_ERR_EEPROM;
1843 DEBUGFUNC("ixgbe_get_eeprom_semaphore");
1846 /* Get SMBI software semaphore between device drivers first */
1847 for (i = 0; i < timeout; i++) {
1849 * If the SMBI bit is 0 when we read it, then the bit will be
1850 * set and we have the semaphore
1852 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1853 if (!(swsm & IXGBE_SWSM_SMBI)) {
1854 status = IXGBE_SUCCESS;
1861 DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
1864 * this release is particularly important because our attempts
1865 * above to get the semaphore may have succeeded, and if there
1866 * was a timeout, we should unconditionally clear the semaphore
1867 * bits to free the driver to make progress
1869 ixgbe_release_eeprom_semaphore(hw);
1874 * If the SMBI bit is 0 when we read it, then the bit will be
1875 * set and we have the semaphore
1877 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1878 if (!(swsm & IXGBE_SWSM_SMBI))
1879 status = IXGBE_SUCCESS;
1882 /* Now get the semaphore between SW/FW through the SWESMBI bit */
1883 if (status == IXGBE_SUCCESS) {
1884 for (i = 0; i < timeout; i++) {
1885 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1887 /* Set the SW EEPROM semaphore bit to request access */
1888 swsm |= IXGBE_SWSM_SWESMBI;
1889 IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm);
1892 * If we set the bit successfully then we got the
1895 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw));
1896 if (swsm & IXGBE_SWSM_SWESMBI)
1903 * Release semaphores and return error if SW EEPROM semaphore
1904 * was not granted because we don't have access to the EEPROM
1907 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1908 "SWESMBI Software EEPROM semaphore not granted.\n");
1909 ixgbe_release_eeprom_semaphore(hw);
1910 status = IXGBE_ERR_EEPROM;
1913 ERROR_REPORT1(IXGBE_ERROR_POLLING,
1914 "Software semaphore SMBI between device drivers "
1922 * ixgbe_release_eeprom_semaphore - Release hardware semaphore
1923 * @hw: pointer to hardware structure
1925 * This function clears hardware semaphore bits.
1927 STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
1931 DEBUGFUNC("ixgbe_release_eeprom_semaphore");
1933 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
1935 /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
1936 swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
1937 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
1938 IXGBE_WRITE_FLUSH(hw);
1942 * ixgbe_ready_eeprom - Polls for EEPROM ready
1943 * @hw: pointer to hardware structure
1945 STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
1947 s32 status = IXGBE_SUCCESS;
1951 DEBUGFUNC("ixgbe_ready_eeprom");
1954 * Read "Status Register" repeatedly until the LSB is cleared. The
1955 * EEPROM will signal that the command has been completed by clearing
1956 * bit 0 of the internal status register. If it's not cleared within
1957 * 5 milliseconds, then error out.
1959 for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
1960 ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
1961 IXGBE_EEPROM_OPCODE_BITS);
1962 spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
1963 if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
1967 ixgbe_standby_eeprom(hw);
1971 * On some parts, SPI write time could vary from 0-20mSec on 3.3V
1972 * devices (and only 0-5mSec on 5V devices)
1974 if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
1975 DEBUGOUT("SPI EEPROM Status error\n");
1976 status = IXGBE_ERR_EEPROM;
1983 * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
1984 * @hw: pointer to hardware structure
1986 STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
1990 DEBUGFUNC("ixgbe_standby_eeprom");
1992 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
1994 /* Toggle CS to flush commands */
1995 eec |= IXGBE_EEC_CS;
1996 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
1997 IXGBE_WRITE_FLUSH(hw);
1999 eec &= ~IXGBE_EEC_CS;
2000 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2001 IXGBE_WRITE_FLUSH(hw);
2006 * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
2007 * @hw: pointer to hardware structure
2008 * @data: data to send to the EEPROM
2009 * @count: number of bits to shift out
2011 STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
2018 DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
2020 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2023 * Mask is used to shift "count" bits of "data" out to the EEPROM
2024 * one bit at a time. Determine the starting bit based on count
2026 mask = 0x01 << (count - 1);
2028 for (i = 0; i < count; i++) {
2030 * A "1" is shifted out to the EEPROM by setting bit "DI" to a
2031 * "1", and then raising and then lowering the clock (the SK
2032 * bit controls the clock input to the EEPROM). A "0" is
2033 * shifted out to the EEPROM by setting "DI" to "0" and then
2034 * raising and then lowering the clock.
2037 eec |= IXGBE_EEC_DI;
2039 eec &= ~IXGBE_EEC_DI;
2041 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2042 IXGBE_WRITE_FLUSH(hw);
2046 ixgbe_raise_eeprom_clk(hw, &eec);
2047 ixgbe_lower_eeprom_clk(hw, &eec);
2050 * Shift mask to signify next bit of data to shift in to the
2056 /* We leave the "DI" bit set to "0" when we leave this routine. */
2057 eec &= ~IXGBE_EEC_DI;
2058 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2059 IXGBE_WRITE_FLUSH(hw);
2063 * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
2064 * @hw: pointer to hardware structure
2065 * @count: number of bits to shift
2067 STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
2073 DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
2076 * In order to read a register from the EEPROM, we need to shift
2077 * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
2078 * the clock input to the EEPROM (setting the SK bit), and then reading
2079 * the value of the "DO" bit. During this "shifting in" process the
2080 * "DI" bit should always be clear.
2082 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2084 eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
2086 for (i = 0; i < count; i++) {
2088 ixgbe_raise_eeprom_clk(hw, &eec);
2090 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2092 eec &= ~(IXGBE_EEC_DI);
2093 if (eec & IXGBE_EEC_DO)
2096 ixgbe_lower_eeprom_clk(hw, &eec);
2103 * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
2104 * @hw: pointer to hardware structure
2105 * @eec: EEC register's current value
2107 STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2109 DEBUGFUNC("ixgbe_raise_eeprom_clk");
2112 * Raise the clock input to the EEPROM
2113 * (setting the SK bit), then delay
2115 *eec = *eec | IXGBE_EEC_SK;
2116 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2117 IXGBE_WRITE_FLUSH(hw);
2122 * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
2123 * @hw: pointer to hardware structure
2124 * @eec: EEC's current value
2126 STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
2128 DEBUGFUNC("ixgbe_lower_eeprom_clk");
2131 * Lower the clock input to the EEPROM (clearing the SK bit), then
2134 *eec = *eec & ~IXGBE_EEC_SK;
2135 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec);
2136 IXGBE_WRITE_FLUSH(hw);
2141 * ixgbe_release_eeprom - Release EEPROM, release semaphores
2142 * @hw: pointer to hardware structure
2144 STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw)
2148 DEBUGFUNC("ixgbe_release_eeprom");
2150 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw));
2152 eec |= IXGBE_EEC_CS; /* Pull CS high */
2153 eec &= ~IXGBE_EEC_SK; /* Lower SCK */
2155 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2156 IXGBE_WRITE_FLUSH(hw);
2160 /* Stop requesting EEPROM access */
2161 eec &= ~IXGBE_EEC_REQ;
2162 IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec);
2164 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
2166 /* Delay before attempt to obtain semaphore again to allow FW access */
2167 msec_delay(hw->eeprom.semaphore_delay);
2171 * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
2172 * @hw: pointer to hardware structure
2174 * Returns a negative error code on error, or the 16-bit checksum
2176 s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
2185 DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
2187 /* Include 0x0-0x3F in the checksum */
2188 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
2189 if (hw->eeprom.ops.read(hw, i, &word)) {
2190 DEBUGOUT("EEPROM read failed\n");
2191 return IXGBE_ERR_EEPROM;
2196 /* Include all data from pointers except for the fw pointer */
2197 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
2198 if (hw->eeprom.ops.read(hw, i, &pointer)) {
2199 DEBUGOUT("EEPROM read failed\n");
2200 return IXGBE_ERR_EEPROM;
2203 /* If the pointer seems invalid */
2204 if (pointer == 0xFFFF || pointer == 0)
2207 if (hw->eeprom.ops.read(hw, pointer, &length)) {
2208 DEBUGOUT("EEPROM read failed\n");
2209 return IXGBE_ERR_EEPROM;
2212 if (length == 0xFFFF || length == 0)
2215 for (j = pointer + 1; j <= pointer + length; j++) {
2216 if (hw->eeprom.ops.read(hw, j, &word)) {
2217 DEBUGOUT("EEPROM read failed\n");
2218 return IXGBE_ERR_EEPROM;
2224 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
2226 return (s32)checksum;
2230 * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
2231 * @hw: pointer to hardware structure
2232 * @checksum_val: calculated checksum
2234 * Performs checksum calculation and validates the EEPROM checksum. If the
2235 * caller does not need checksum_val, the value can be NULL.
2237 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
2242 u16 read_checksum = 0;
2244 DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
2246 /* Read the first word from the EEPROM. If this times out or fails, do
2247 * not continue or we could be in for a very long wait while every
2250 status = hw->eeprom.ops.read(hw, 0, &checksum);
2252 DEBUGOUT("EEPROM read failed\n");
2256 status = hw->eeprom.ops.calc_checksum(hw);
2260 checksum = (u16)(status & 0xffff);
2262 status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
2264 DEBUGOUT("EEPROM read failed\n");
2268 /* Verify read checksum from EEPROM is the same as
2269 * calculated checksum
2271 if (read_checksum != checksum)
2272 status = IXGBE_ERR_EEPROM_CHECKSUM;
2274 /* If the user cares, return the calculated checksum */
2276 *checksum_val = checksum;
2282 * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
2283 * @hw: pointer to hardware structure
2285 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
2290 DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
2292 /* Read the first word from the EEPROM. If this times out or fails, do
2293 * not continue or we could be in for a very long wait while every
2296 status = hw->eeprom.ops.read(hw, 0, &checksum);
2298 DEBUGOUT("EEPROM read failed\n");
2302 status = hw->eeprom.ops.calc_checksum(hw);
2306 checksum = (u16)(status & 0xffff);
2308 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum);
2314 * ixgbe_validate_mac_addr - Validate MAC address
2315 * @mac_addr: pointer to MAC address.
2317 * Tests a MAC address to ensure it is a valid Individual Address.
2319 s32 ixgbe_validate_mac_addr(u8 *mac_addr)
2321 s32 status = IXGBE_SUCCESS;
2323 DEBUGFUNC("ixgbe_validate_mac_addr");
2325 /* Make sure it is not a multicast address */
2326 if (IXGBE_IS_MULTICAST(mac_addr)) {
2327 status = IXGBE_ERR_INVALID_MAC_ADDR;
2328 /* Not a broadcast address */
2329 } else if (IXGBE_IS_BROADCAST(mac_addr)) {
2330 status = IXGBE_ERR_INVALID_MAC_ADDR;
2331 /* Reject the zero address */
2332 } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
2333 mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
2334 status = IXGBE_ERR_INVALID_MAC_ADDR;
2340 * ixgbe_set_rar_generic - Set Rx address register
2341 * @hw: pointer to hardware structure
2342 * @index: Receive address register to write
2343 * @addr: Address to put into receive address register
2344 * @vmdq: VMDq "set" or "pool" index
2345 * @enable_addr: set flag that address is active
2347 * Puts an ethernet address into a receive address register.
2349 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
2352 u32 rar_low, rar_high;
2353 u32 rar_entries = hw->mac.num_rar_entries;
2355 DEBUGFUNC("ixgbe_set_rar_generic");
2357 /* Make sure we are using a valid rar index range */
2358 if (index >= rar_entries) {
2359 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2360 "RAR index %d is out of range.\n", index);
2361 return IXGBE_ERR_INVALID_ARGUMENT;
2364 /* setup VMDq pool selection before this RAR gets enabled */
2365 hw->mac.ops.set_vmdq(hw, index, vmdq);
2368 * HW expects these in little endian so we reverse the byte
2369 * order from network order (big endian) to little endian
2371 rar_low = ((u32)addr[0] |
2372 ((u32)addr[1] << 8) |
2373 ((u32)addr[2] << 16) |
2374 ((u32)addr[3] << 24));
2376 * Some parts put the VMDq setting in the extra RAH bits,
2377 * so save everything except the lower 16 bits that hold part
2378 * of the address and the address valid bit.
2380 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2381 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2382 rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
2384 if (enable_addr != 0)
2385 rar_high |= IXGBE_RAH_AV;
2387 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
2388 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2390 return IXGBE_SUCCESS;
2394 * ixgbe_clear_rar_generic - Remove Rx address register
2395 * @hw: pointer to hardware structure
2396 * @index: Receive address register to write
2398 * Clears an ethernet address from a receive address register.
2400 s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
2403 u32 rar_entries = hw->mac.num_rar_entries;
2405 DEBUGFUNC("ixgbe_clear_rar_generic");
2407 /* Make sure we are using a valid rar index range */
2408 if (index >= rar_entries) {
2409 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
2410 "RAR index %d is out of range.\n", index);
2411 return IXGBE_ERR_INVALID_ARGUMENT;
2415 * Some parts put the VMDq setting in the extra RAH bits,
2416 * so save everything except the lower 16 bits that hold part
2417 * of the address and the address valid bit.
2419 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
2420 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
2422 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
2423 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
2425 /* clear VMDq pool/queue selection for this RAR */
2426 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
2428 return IXGBE_SUCCESS;
2432 * ixgbe_init_rx_addrs_generic - Initializes receive address filters.
2433 * @hw: pointer to hardware structure
2435 * Places the MAC address in receive address register 0 and clears the rest
2436 * of the receive address registers. Clears the multicast table. Assumes
2437 * the receiver is in reset when the routine is called.
2439 s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
2442 u32 rar_entries = hw->mac.num_rar_entries;
2444 DEBUGFUNC("ixgbe_init_rx_addrs_generic");
2447 * If the current mac address is valid, assume it is a software override
2448 * to the permanent address.
2449 * Otherwise, use the permanent address from the eeprom.
2451 if (ixgbe_validate_mac_addr(hw->mac.addr) ==
2452 IXGBE_ERR_INVALID_MAC_ADDR) {
2453 /* Get the MAC address from the RAR0 for later reference */
2454 hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2456 DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
2457 hw->mac.addr[0], hw->mac.addr[1],
2459 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2460 hw->mac.addr[4], hw->mac.addr[5]);
2462 /* Setup the receive address. */
2463 DEBUGOUT("Overriding MAC Address in RAR[0]\n");
2464 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
2465 hw->mac.addr[0], hw->mac.addr[1],
2467 DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
2468 hw->mac.addr[4], hw->mac.addr[5]);
2470 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2473 /* clear VMDq pool/queue selection for RAR 0 */
2474 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
2476 hw->addr_ctrl.overflow_promisc = 0;
2478 hw->addr_ctrl.rar_used_count = 1;
2480 /* Zero out the other receive addresses. */
2481 DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
2482 for (i = 1; i < rar_entries; i++) {
2483 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
2484 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
2488 hw->addr_ctrl.mta_in_use = 0;
2489 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2491 DEBUGOUT(" Clearing MTA\n");
2492 for (i = 0; i < hw->mac.mcft_size; i++)
2493 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
2495 ixgbe_init_uta_tables(hw);
2497 return IXGBE_SUCCESS;
2501 * ixgbe_add_uc_addr - Adds a secondary unicast address.
2502 * @hw: pointer to hardware structure
2503 * @addr: new address
2504 * @vmdq: VMDq "set" or "pool" index
2506 * Adds it to unused receive address register or goes into promiscuous mode.
2508 void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
2510 u32 rar_entries = hw->mac.num_rar_entries;
2513 DEBUGFUNC("ixgbe_add_uc_addr");
2515 DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
2516 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
2519 * Place this address in the RAR if there is room,
2520 * else put the controller into promiscuous mode
2522 if (hw->addr_ctrl.rar_used_count < rar_entries) {
2523 rar = hw->addr_ctrl.rar_used_count;
2524 hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
2525 DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
2526 hw->addr_ctrl.rar_used_count++;
2528 hw->addr_ctrl.overflow_promisc++;
2531 DEBUGOUT("ixgbe_add_uc_addr Complete\n");
2535 * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
2536 * @hw: pointer to hardware structure
2537 * @addr_list: the list of new addresses
2538 * @addr_count: number of addresses
2539 * @next: iterator function to walk the address list
2541 * The given list replaces any existing list. Clears the secondary addrs from
2542 * receive address registers. Uses unused receive address registers for the
2543 * first secondary addresses, and falls back to promiscuous mode as needed.
2545 * Drivers using secondary unicast addresses must set user_set_promisc when
2546 * manually putting the device into promiscuous mode.
2548 s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
2549 u32 addr_count, ixgbe_mc_addr_itr next)
2553 u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
2558 DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
2561 * Clear accounting of old secondary address list,
2562 * don't count RAR[0]
2564 uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
2565 hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
2566 hw->addr_ctrl.overflow_promisc = 0;
2568 /* Zero out the other receive addresses */
2569 DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
2570 for (i = 0; i < uc_addr_in_use; i++) {
2571 IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
2572 IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
2575 /* Add the new addresses */
2576 for (i = 0; i < addr_count; i++) {
2577 DEBUGOUT(" Adding the secondary addresses:\n");
2578 addr = next(hw, &addr_list, &vmdq);
2579 ixgbe_add_uc_addr(hw, addr, vmdq);
2582 if (hw->addr_ctrl.overflow_promisc) {
2583 /* enable promisc if not already in overflow or set by user */
2584 if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2585 DEBUGOUT(" Entering address overflow promisc mode\n");
2586 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2587 fctrl |= IXGBE_FCTRL_UPE;
2588 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2591 /* only disable if set by overflow, not by user */
2592 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
2593 DEBUGOUT(" Leaving address overflow promisc mode\n");
2594 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2595 fctrl &= ~IXGBE_FCTRL_UPE;
2596 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2600 DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
2601 return IXGBE_SUCCESS;
2605 * ixgbe_mta_vector - Determines bit-vector in multicast table to set
2606 * @hw: pointer to hardware structure
2607 * @mc_addr: the multicast address
2609 * Extracts the 12 bits, from a multicast address, to determine which
2610 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
2611 * incoming rx multicast addresses, to determine the bit-vector to check in
2612 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
2613 * by the MO field of the MCSTCTRL. The MO field is set during initialization
2614 * to mc_filter_type.
2616 STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
2620 DEBUGFUNC("ixgbe_mta_vector");
2622 switch (hw->mac.mc_filter_type) {
2623 case 0: /* use bits [47:36] of the address */
2624 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
2626 case 1: /* use bits [46:35] of the address */
2627 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
2629 case 2: /* use bits [45:34] of the address */
2630 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
2632 case 3: /* use bits [43:32] of the address */
2633 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
2635 default: /* Invalid mc_filter_type */
2636 DEBUGOUT("MC filter type param set incorrectly\n");
2641 /* vector can only be 12-bits or boundary will be exceeded */
2647 * ixgbe_set_mta - Set bit-vector in multicast table
2648 * @hw: pointer to hardware structure
2649 * @mc_addr: Multicast address
2651 * Sets the bit-vector in the multicast table.
2653 void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
2659 DEBUGFUNC("ixgbe_set_mta");
2661 hw->addr_ctrl.mta_in_use++;
2663 vector = ixgbe_mta_vector(hw, mc_addr);
2664 DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
2667 * The MTA is a register array of 128 32-bit registers. It is treated
2668 * like an array of 4096 bits. We want to set bit
2669 * BitArray[vector_value]. So we figure out what register the bit is
2670 * in, read it, OR in the new bit, then write back the new value. The
2671 * register is determined by the upper 7 bits of the vector value and
2672 * the bit within that register are determined by the lower 5 bits of
2675 vector_reg = (vector >> 5) & 0x7F;
2676 vector_bit = vector & 0x1F;
2677 hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
2681 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
2682 * @hw: pointer to hardware structure
2683 * @mc_addr_list: the list of new multicast addresses
2684 * @mc_addr_count: number of addresses
2685 * @next: iterator function to walk the multicast address list
2686 * @clear: flag, when set clears the table beforehand
2688 * When the clear flag is set, the given list replaces any existing list.
2689 * Hashes the given addresses into the multicast table.
2691 s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
2692 u32 mc_addr_count, ixgbe_mc_addr_itr next,
2698 DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
2701 * Set the new number of MC addresses that we are being requested to
2704 hw->addr_ctrl.num_mc_addrs = mc_addr_count;
2705 hw->addr_ctrl.mta_in_use = 0;
2707 /* Clear mta_shadow */
2709 DEBUGOUT(" Clearing MTA\n");
2710 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
2713 /* Update mta_shadow */
2714 for (i = 0; i < mc_addr_count; i++) {
2715 DEBUGOUT(" Adding the multicast addresses:\n");
2716 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
2720 for (i = 0; i < hw->mac.mcft_size; i++)
2721 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
2722 hw->mac.mta_shadow[i]);
2724 if (hw->addr_ctrl.mta_in_use > 0)
2725 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
2726 IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
2728 DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
2729 return IXGBE_SUCCESS;
2733 * ixgbe_enable_mc_generic - Enable multicast address in RAR
2734 * @hw: pointer to hardware structure
2736 * Enables multicast address in RAR and the use of the multicast hash table.
2738 s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
2740 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2742 DEBUGFUNC("ixgbe_enable_mc_generic");
2744 if (a->mta_in_use > 0)
2745 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
2746 hw->mac.mc_filter_type);
2748 return IXGBE_SUCCESS;
2752 * ixgbe_disable_mc_generic - Disable multicast address in RAR
2753 * @hw: pointer to hardware structure
2755 * Disables multicast address in RAR and the use of the multicast hash table.
2757 s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
2759 struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
2761 DEBUGFUNC("ixgbe_disable_mc_generic");
2763 if (a->mta_in_use > 0)
2764 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
2766 return IXGBE_SUCCESS;
2770 * ixgbe_fc_enable_generic - Enable flow control
2771 * @hw: pointer to hardware structure
2773 * Enable flow control according to the current settings.
2775 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
2777 s32 ret_val = IXGBE_SUCCESS;
2778 u32 mflcn_reg, fccfg_reg;
2783 DEBUGFUNC("ixgbe_fc_enable_generic");
2785 /* Validate the water mark configuration */
2786 if (!hw->fc.pause_time) {
2787 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2791 /* Low water mark of zero causes XOFF floods */
2792 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2793 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2794 hw->fc.high_water[i]) {
2795 if (!hw->fc.low_water[i] ||
2796 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
2797 DEBUGOUT("Invalid water mark configuration\n");
2798 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2804 /* Negotiate the fc mode to use */
2805 hw->mac.ops.fc_autoneg(hw);
2807 /* Disable any previous flow control settings */
2808 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
2809 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
2811 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
2812 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
2815 * The possible values of fc.current_mode are:
2816 * 0: Flow control is completely disabled
2817 * 1: Rx flow control is enabled (we can receive pause frames,
2818 * but not send pause frames).
2819 * 2: Tx flow control is enabled (we can send pause frames but
2820 * we do not support receiving pause frames).
2821 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2824 switch (hw->fc.current_mode) {
2827 * Flow control is disabled by software override or autoneg.
2828 * The code below will actually disable it in the HW.
2831 case ixgbe_fc_rx_pause:
2833 * Rx Flow control is enabled and Tx Flow control is
2834 * disabled by software override. Since there really
2835 * isn't a way to advertise that we are capable of RX
2836 * Pause ONLY, we will advertise that we support both
2837 * symmetric and asymmetric Rx PAUSE. Later, we will
2838 * disable the adapter's ability to send PAUSE frames.
2840 mflcn_reg |= IXGBE_MFLCN_RFCE;
2842 case ixgbe_fc_tx_pause:
2844 * Tx Flow control is enabled, and Rx Flow control is
2845 * disabled by software override.
2847 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2850 /* Flow control (both Rx and Tx) is enabled by SW override. */
2851 mflcn_reg |= IXGBE_MFLCN_RFCE;
2852 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
2855 ERROR_REPORT1(IXGBE_ERROR_ARGUMENT,
2856 "Flow control param set incorrectly\n");
2857 ret_val = IXGBE_ERR_CONFIG;
2862 /* Set 802.3x based flow control settings. */
2863 mflcn_reg |= IXGBE_MFLCN_DPF;
2864 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2865 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2868 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2869 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
2870 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2871 hw->fc.high_water[i]) {
2872 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
2873 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2874 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2876 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2878 * In order to prevent Tx hangs when the internal Tx
2879 * switch is enabled we must set the high water mark
2880 * to the Rx packet buffer size - 24KB. This allows
2881 * the Tx switch to function even under heavy Rx
2884 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
2887 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2890 /* Configure pause time (2 TCs per register) */
2891 reg = hw->fc.pause_time * 0x00010001;
2892 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
2893 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2895 /* Configure flow control refresh threshold value */
2896 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2903 * ixgbe_negotiate_fc - Negotiate flow control
2904 * @hw: pointer to hardware structure
2905 * @adv_reg: flow control advertised settings
2906 * @lp_reg: link partner's flow control settings
2907 * @adv_sym: symmetric pause bit in advertisement
2908 * @adv_asm: asymmetric pause bit in advertisement
2909 * @lp_sym: symmetric pause bit in link partner advertisement
2910 * @lp_asm: asymmetric pause bit in link partner advertisement
2912 * Find the intersection between advertised settings and link partner's
2913 * advertised settings
2915 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2916 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2918 if ((!(adv_reg)) || (!(lp_reg))) {
2919 ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED,
2920 "Local or link partner's advertised flow control "
2921 "settings are NULL. Local: %x, link partner: %x\n",
2923 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2926 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2928 * Now we need to check if the user selected Rx ONLY
2929 * of pause frames. In this case, we had to advertise
2930 * FULL flow control because we could not advertise RX
2931 * ONLY. Hence, we must now check to see if we need to
2932 * turn OFF the TRANSMISSION of PAUSE frames.
2934 if (hw->fc.requested_mode == ixgbe_fc_full) {
2935 hw->fc.current_mode = ixgbe_fc_full;
2936 DEBUGOUT("Flow Control = FULL.\n");
2938 hw->fc.current_mode = ixgbe_fc_rx_pause;
2939 DEBUGOUT("Flow Control=RX PAUSE frames only\n");
2941 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2942 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2943 hw->fc.current_mode = ixgbe_fc_tx_pause;
2944 DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
2945 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2946 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2947 hw->fc.current_mode = ixgbe_fc_rx_pause;
2948 DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
2950 hw->fc.current_mode = ixgbe_fc_none;
2951 DEBUGOUT("Flow Control = NONE.\n");
2953 return IXGBE_SUCCESS;
2957 * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
2958 * @hw: pointer to hardware structure
2960 * Enable flow control according on 1 gig fiber.
2962 STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2964 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2965 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2968 * On multispeed fiber at 1g, bail out if
2969 * - link is up but AN did not complete, or if
2970 * - link is up and AN completed but timed out
2973 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2974 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2975 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
2976 DEBUGOUT("Auto-Negotiation did not complete or timed out\n");
2980 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2981 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
2983 ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg,
2984 pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
2985 IXGBE_PCS1GANA_ASM_PAUSE,
2986 IXGBE_PCS1GANA_SYM_PAUSE,
2987 IXGBE_PCS1GANA_ASM_PAUSE);
2994 * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
2995 * @hw: pointer to hardware structure
2997 * Enable flow control according to IEEE clause 37.
2999 STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
3001 u32 links2, anlp1_reg, autoc_reg, links;
3002 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3005 * On backplane, bail out if
3006 * - backplane autoneg was not completed, or if
3007 * - we are 82599 and link partner is not AN enabled
3009 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
3010 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
3011 DEBUGOUT("Auto-Negotiation did not complete\n");
3015 if (hw->mac.type == ixgbe_mac_82599EB) {
3016 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
3017 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
3018 DEBUGOUT("Link partner is not AN enabled\n");
3023 * Read the 10g AN autoc and LP ability registers and resolve
3024 * local flow control settings accordingly
3026 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3027 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
3029 ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
3030 anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
3031 IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
3038 * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
3039 * @hw: pointer to hardware structure
3041 * Enable flow control according to IEEE clause 37.
3043 STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
3045 u16 technology_ability_reg = 0;
3046 u16 lp_technology_ability_reg = 0;
3048 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
3049 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3050 &technology_ability_reg);
3051 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
3052 IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
3053 &lp_technology_ability_reg);
3055 return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
3056 (u32)lp_technology_ability_reg,
3057 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
3058 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
3062 * ixgbe_fc_autoneg - Configure flow control
3063 * @hw: pointer to hardware structure
3065 * Compares our advertised flow control capabilities to those advertised by
3066 * our link partner, and determines the proper flow control mode to use.
3068 void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
3070 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
3071 ixgbe_link_speed speed;
3074 DEBUGFUNC("ixgbe_fc_autoneg");
3077 * AN should have completed when the cable was plugged in.
3078 * Look for reasons to bail out. Bail out if:
3079 * - FC autoneg is disabled, or if
3082 if (hw->fc.disable_fc_autoneg) {
3083 ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
3084 "Flow control autoneg is disabled");
3088 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3090 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down");
3094 switch (hw->phy.media_type) {
3095 /* Autoneg flow control on fiber adapters */
3096 case ixgbe_media_type_fiber_qsfp:
3097 case ixgbe_media_type_fiber:
3098 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
3099 ret_val = ixgbe_fc_autoneg_fiber(hw);
3102 /* Autoneg flow control on backplane adapters */
3103 case ixgbe_media_type_backplane:
3104 ret_val = ixgbe_fc_autoneg_backplane(hw);
3107 /* Autoneg flow control on copper adapters */
3108 case ixgbe_media_type_copper:
3109 if (ixgbe_device_supports_autoneg_fc(hw))
3110 ret_val = ixgbe_fc_autoneg_copper(hw);
3118 if (ret_val == IXGBE_SUCCESS) {
3119 hw->fc.fc_was_autonegged = true;
3121 hw->fc.fc_was_autonegged = false;
3122 hw->fc.current_mode = hw->fc.requested_mode;
3127 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
3128 * @hw: pointer to hardware structure
3130 * System-wide timeout range is encoded in PCIe Device Control2 register.
3132 * Add 10% to specified maximum and return the number of times to poll for
3133 * completion timeout, in units of 100 microsec. Never return less than
3134 * 800 = 80 millisec.
3136 STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
3141 devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
3142 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
3145 case IXGBE_PCIDEVCTRL2_65_130ms:
3146 pollcnt = 1300; /* 130 millisec */
3148 case IXGBE_PCIDEVCTRL2_260_520ms:
3149 pollcnt = 5200; /* 520 millisec */
3151 case IXGBE_PCIDEVCTRL2_1_2s:
3152 pollcnt = 20000; /* 2 sec */
3154 case IXGBE_PCIDEVCTRL2_4_8s:
3155 pollcnt = 80000; /* 8 sec */
3157 case IXGBE_PCIDEVCTRL2_17_34s:
3158 pollcnt = 34000; /* 34 sec */
3160 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
3161 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
3162 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
3163 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
3165 pollcnt = 800; /* 80 millisec minimum */
3169 /* add 10% to spec maximum */
3170 return (pollcnt * 11) / 10;
3174 * ixgbe_disable_pcie_master - Disable PCI-express master access
3175 * @hw: pointer to hardware structure
3177 * Disables PCI-Express master access and verifies there are no pending
3178 * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
3179 * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
3180 * is returned signifying master requests disabled.
3182 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
3184 s32 status = IXGBE_SUCCESS;
3188 DEBUGFUNC("ixgbe_disable_pcie_master");
3190 /* Always set this bit to ensure any future transactions are blocked */
3191 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
3193 /* Exit if master requests are blocked */
3194 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
3195 IXGBE_REMOVED(hw->hw_addr))
3198 /* Poll for master request bit to clear */
3199 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
3201 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
3206 * Two consecutive resets are required via CTRL.RST per datasheet
3207 * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
3208 * of this need. The first reset prevents new master requests from
3209 * being issued by our device. We then must wait 1usec or more for any
3210 * remaining completions from the PCIe bus to trickle in, and then reset
3211 * again to clear out any effects they may have had on our device.
3213 DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
3214 hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3216 if (hw->mac.type >= ixgbe_mac_X550)
3220 * Before proceeding, make sure that the PCIe block does not have
3221 * transactions pending.
3223 poll = ixgbe_pcie_timeout_poll(hw);
3224 for (i = 0; i < poll; i++) {
3226 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
3227 if (IXGBE_REMOVED(hw->hw_addr))
3229 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
3233 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3234 "PCIe transaction pending bit also did not clear.\n");
3235 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
3242 * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
3243 * @hw: pointer to hardware structure
3244 * @mask: Mask to specify which semaphore to acquire
3246 * Acquires the SWFW semaphore through the GSSR register for the specified
3247 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3249 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3253 u32 fwmask = mask << 5;
3257 DEBUGFUNC("ixgbe_acquire_swfw_sync");
3259 for (i = 0; i < timeout; i++) {
3261 * SW NVM semaphore bit is used for access to all
3262 * SW_FW_SYNC bits (not just NVM)
3264 if (ixgbe_get_eeprom_semaphore(hw))
3265 return IXGBE_ERR_SWFW_SYNC;
3267 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3268 if (!(gssr & (fwmask | swmask))) {
3270 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3271 ixgbe_release_eeprom_semaphore(hw);
3272 return IXGBE_SUCCESS;
3274 /* Resource is currently in use by FW or SW */
3275 ixgbe_release_eeprom_semaphore(hw);
3280 /* If time expired clear the bits holding the lock and retry */
3281 if (gssr & (fwmask | swmask))
3282 ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
3285 return IXGBE_ERR_SWFW_SYNC;
3289 * ixgbe_release_swfw_sync - Release SWFW semaphore
3290 * @hw: pointer to hardware structure
3291 * @mask: Mask to specify which semaphore to release
3293 * Releases the SWFW semaphore through the GSSR register for the specified
3294 * function (CSR, PHY0, PHY1, EEPROM, Flash)
3296 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask)
3301 DEBUGFUNC("ixgbe_release_swfw_sync");
3303 ixgbe_get_eeprom_semaphore(hw);
3305 gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
3307 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
3309 ixgbe_release_eeprom_semaphore(hw);
3313 * ixgbe_disable_sec_rx_path_generic - Stops the receive data path
3314 * @hw: pointer to hardware structure
3316 * Stops the receive data path and waits for the HW to internally empty
3317 * the Rx security block
3319 s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
3321 #define IXGBE_MAX_SECRX_POLL 4000
3326 DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
3329 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3330 secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
3331 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3332 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
3333 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
3334 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
3337 /* Use interrupt-safe sleep just in case */
3341 /* For informational purposes only */
3342 if (i >= IXGBE_MAX_SECRX_POLL)
3343 DEBUGOUT("Rx unit being enabled before security "
3344 "path fully disabled. Continuing with init.\n");
3346 return IXGBE_SUCCESS;
3350 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
3351 * @hw: pointer to hardware structure
3352 * @locked: bool to indicate whether the SW/FW lock was taken
3353 * @reg_val: Value we read from AUTOC
3355 * The default case requires no protection so just to the register read.
3357 s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
3360 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
3361 return IXGBE_SUCCESS;
3365 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
3366 * @hw: pointer to hardware structure
3367 * @reg_val: value to write to AUTOC
3368 * @locked: bool to indicate whether the SW/FW lock was already taken by
3371 * The default case requires no protection so just to the register write.
3373 s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
3375 UNREFERENCED_1PARAMETER(locked);
3377 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
3378 return IXGBE_SUCCESS;
3382 * ixgbe_enable_sec_rx_path_generic - Enables the receive data path
3383 * @hw: pointer to hardware structure
3385 * Enables the receive data path.
3387 s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw)
3391 DEBUGFUNC("ixgbe_enable_sec_rx_path_generic");
3393 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
3394 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
3395 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
3396 IXGBE_WRITE_FLUSH(hw);
3398 return IXGBE_SUCCESS;
3402 * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
3403 * @hw: pointer to hardware structure
3404 * @regval: register value to write to RXCTRL
3406 * Enables the Rx DMA unit
3408 s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
3410 DEBUGFUNC("ixgbe_enable_rx_dma_generic");
3412 if (regval & IXGBE_RXCTRL_RXEN)
3413 ixgbe_enable_rx(hw);
3415 ixgbe_disable_rx(hw);
3417 return IXGBE_SUCCESS;
3421 * ixgbe_blink_led_start_generic - Blink LED based on index.
3422 * @hw: pointer to hardware structure
3423 * @index: led number to blink
3425 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
3427 ixgbe_link_speed speed = 0;
3430 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3431 s32 ret_val = IXGBE_SUCCESS;
3432 bool locked = false;
3434 DEBUGFUNC("ixgbe_blink_led_start_generic");
3437 return IXGBE_ERR_PARAM;
3440 * Link must be up to auto-blink the LEDs;
3441 * Force it if link is down.
3443 hw->mac.ops.check_link(hw, &speed, &link_up, false);
3446 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3447 if (ret_val != IXGBE_SUCCESS)
3450 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3451 autoc_reg |= IXGBE_AUTOC_FLU;
3453 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3454 if (ret_val != IXGBE_SUCCESS)
3457 IXGBE_WRITE_FLUSH(hw);
3461 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3462 led_reg |= IXGBE_LED_BLINK(index);
3463 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3464 IXGBE_WRITE_FLUSH(hw);
3471 * ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
3472 * @hw: pointer to hardware structure
3473 * @index: led number to stop blinking
3475 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
3478 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
3479 s32 ret_val = IXGBE_SUCCESS;
3480 bool locked = false;
3482 DEBUGFUNC("ixgbe_blink_led_stop_generic");
3485 return IXGBE_ERR_PARAM;
3488 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
3489 if (ret_val != IXGBE_SUCCESS)
3492 autoc_reg &= ~IXGBE_AUTOC_FLU;
3493 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
3495 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
3496 if (ret_val != IXGBE_SUCCESS)
3499 led_reg &= ~IXGBE_LED_MODE_MASK(index);
3500 led_reg &= ~IXGBE_LED_BLINK(index);
3501 led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
3502 IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
3503 IXGBE_WRITE_FLUSH(hw);
3510 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
3511 * @hw: pointer to hardware structure
3512 * @san_mac_offset: SAN MAC address offset
3514 * This function will read the EEPROM location for the SAN MAC address
3515 * pointer, and returns the value at that location. This is used in both
3516 * get and set mac_addr routines.
3518 STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
3519 u16 *san_mac_offset)
3523 DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
3526 * First read the EEPROM pointer to see if the MAC addresses are
3529 ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
3532 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3533 "eeprom at offset %d failed",
3534 IXGBE_SAN_MAC_ADDR_PTR);
3541 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
3542 * @hw: pointer to hardware structure
3543 * @san_mac_addr: SAN MAC address
3545 * Reads the SAN MAC address from the EEPROM, if it's available. This is
3546 * per-port, so set_lan_id() must be called before reading the addresses.
3547 * set_lan_id() is called by identify_sfp(), but this cannot be relied
3548 * upon for non-SFP connections, so we must call it here.
3550 s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3552 u16 san_mac_data, san_mac_offset;
3556 DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
3559 * First read the EEPROM pointer to see if the MAC addresses are
3560 * available. If they're not, no point in calling set_lan_id() here.
3562 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3563 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3564 goto san_mac_addr_out;
3566 /* make sure we know which port we need to program */
3567 hw->mac.ops.set_lan_id(hw);
3568 /* apply the port offset to the address offset */
3569 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3570 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3571 for (i = 0; i < 3; i++) {
3572 ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
3575 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
3576 "eeprom read at offset %d failed",
3578 goto san_mac_addr_out;
3580 san_mac_addr[i * 2] = (u8)(san_mac_data);
3581 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
3584 return IXGBE_SUCCESS;
3588 * No addresses available in this EEPROM. It's not an
3589 * error though, so just wipe the local address and return.
3591 for (i = 0; i < 6; i++)
3592 san_mac_addr[i] = 0xFF;
3593 return IXGBE_SUCCESS;
3597 * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
3598 * @hw: pointer to hardware structure
3599 * @san_mac_addr: SAN MAC address
3601 * Write a SAN MAC address to the EEPROM.
3603 s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
3606 u16 san_mac_data, san_mac_offset;
3609 DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
3611 /* Look for SAN mac address pointer. If not defined, return */
3612 ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
3613 if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
3614 return IXGBE_ERR_NO_SAN_ADDR_PTR;
3616 /* Make sure we know which port we need to write */
3617 hw->mac.ops.set_lan_id(hw);
3618 /* Apply the port offset to the address offset */
3619 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
3620 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
3622 for (i = 0; i < 3; i++) {
3623 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
3624 san_mac_data |= (u16)(san_mac_addr[i * 2]);
3625 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
3629 return IXGBE_SUCCESS;
3633 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
3634 * @hw: pointer to hardware structure
3636 * Read PCIe configuration space, and get the MSI-X vector count from
3637 * the capabilities table.
3639 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
3645 switch (hw->mac.type) {
3646 case ixgbe_mac_82598EB:
3647 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
3648 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
3650 case ixgbe_mac_82599EB:
3651 case ixgbe_mac_X540:
3652 case ixgbe_mac_X550:
3653 case ixgbe_mac_X550EM_x:
3654 case ixgbe_mac_X550EM_a:
3655 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
3656 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
3662 DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
3663 msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset);
3664 if (IXGBE_REMOVED(hw->hw_addr))
3666 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
3668 /* MSI-X count is zero-based in HW */
3671 if (msix_count > max_msix_count)
3672 msix_count = max_msix_count;
3678 * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
3679 * @hw: pointer to hardware structure
3680 * @addr: Address to put into receive address register
3681 * @vmdq: VMDq pool to assign
3683 * Puts an ethernet address into a receive address register, or
3684 * finds the rar that it is already in; adds to the pool list
3686 s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
3688 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
3689 u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
3691 u32 rar_low, rar_high;
3692 u32 addr_low, addr_high;
3694 DEBUGFUNC("ixgbe_insert_mac_addr_generic");
3696 /* swap bytes for HW little endian */
3697 addr_low = addr[0] | (addr[1] << 8)
3700 addr_high = addr[4] | (addr[5] << 8);
3703 * Either find the mac_id in rar or find the first empty space.
3704 * rar_highwater points to just after the highest currently used
3705 * rar in order to shorten the search. It grows when we add a new
3708 for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
3709 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
3711 if (((IXGBE_RAH_AV & rar_high) == 0)
3712 && first_empty_rar == NO_EMPTY_RAR_FOUND) {
3713 first_empty_rar = rar;
3714 } else if ((rar_high & 0xFFFF) == addr_high) {
3715 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
3716 if (rar_low == addr_low)
3717 break; /* found it already in the rars */
3721 if (rar < hw->mac.rar_highwater) {
3722 /* already there so just add to the pool bits */
3723 ixgbe_set_vmdq(hw, rar, vmdq);
3724 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
3725 /* stick it into first empty RAR slot we found */
3726 rar = first_empty_rar;
3727 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3728 } else if (rar == hw->mac.rar_highwater) {
3729 /* add it to the top of the list and inc the highwater mark */
3730 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
3731 hw->mac.rar_highwater++;
3732 } else if (rar >= hw->mac.num_rar_entries) {
3733 return IXGBE_ERR_INVALID_MAC_ADDR;
3737 * If we found rar[0], make sure the default pool bit (we use pool 0)
3738 * remains cleared to be sure default pool packets will get delivered
3741 ixgbe_clear_vmdq(hw, rar, 0);
3747 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
3748 * @hw: pointer to hardware struct
3749 * @rar: receive address register index to disassociate
3750 * @vmdq: VMDq pool index to remove from the rar
3752 s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3754 u32 mpsar_lo, mpsar_hi;
3755 u32 rar_entries = hw->mac.num_rar_entries;
3757 DEBUGFUNC("ixgbe_clear_vmdq_generic");
3759 /* Make sure we are using a valid rar index range */
3760 if (rar >= rar_entries) {
3761 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3762 "RAR index %d is out of range.\n", rar);
3763 return IXGBE_ERR_INVALID_ARGUMENT;
3766 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3767 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3769 if (IXGBE_REMOVED(hw->hw_addr))
3772 if (!mpsar_lo && !mpsar_hi)
3775 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
3777 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3778 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3781 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3782 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3784 } else if (vmdq < 32) {
3785 mpsar_lo &= ~(1 << vmdq);
3786 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
3788 mpsar_hi &= ~(1 << (vmdq - 32));
3789 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
3792 /* was that the last pool using this rar? */
3793 if (mpsar_lo == 0 && mpsar_hi == 0 &&
3794 rar != 0 && rar != hw->mac.san_mac_rar_index)
3795 hw->mac.ops.clear_rar(hw, rar);
3797 return IXGBE_SUCCESS;
3801 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
3802 * @hw: pointer to hardware struct
3803 * @rar: receive address register index to associate with a VMDq index
3804 * @vmdq: VMDq pool index
3806 s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
3809 u32 rar_entries = hw->mac.num_rar_entries;
3811 DEBUGFUNC("ixgbe_set_vmdq_generic");
3813 /* Make sure we are using a valid rar index range */
3814 if (rar >= rar_entries) {
3815 ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
3816 "RAR index %d is out of range.\n", rar);
3817 return IXGBE_ERR_INVALID_ARGUMENT;
3821 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
3823 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
3825 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
3826 mpsar |= 1 << (vmdq - 32);
3827 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
3829 return IXGBE_SUCCESS;
3833 * This function should only be involved in the IOV mode.
3834 * In IOV mode, Default pool is next pool after the number of
3835 * VFs advertized and not 0.
3836 * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
3838 * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
3839 * @hw: pointer to hardware struct
3840 * @vmdq: VMDq pool index
3842 s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
3844 u32 rar = hw->mac.san_mac_rar_index;
3846 DEBUGFUNC("ixgbe_set_vmdq_san_mac");
3849 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
3850 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
3852 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
3853 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
3856 return IXGBE_SUCCESS;
3860 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
3861 * @hw: pointer to hardware structure
3863 s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
3867 DEBUGFUNC("ixgbe_init_uta_tables_generic");
3868 DEBUGOUT(" Clearing UTA\n");
3870 for (i = 0; i < 128; i++)
3871 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
3873 return IXGBE_SUCCESS;
3877 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
3878 * @hw: pointer to hardware structure
3879 * @vlan: VLAN id to write to VLAN filter
3880 * @vlvf_bypass: true to find vlanid only, false returns first empty slot if
3884 * return the VLVF index where this VLAN id should be placed
3887 s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass)
3889 s32 regindex, first_empty_slot;
3892 /* short cut the special case */
3896 /* if vlvf_bypass is set we don't want to use an empty slot, we
3897 * will simply bypass the VLVF if there are no entries present in the
3898 * VLVF that contain our VLAN
3900 first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0;
3902 /* add VLAN enable bit for comparison */
3903 vlan |= IXGBE_VLVF_VIEN;
3905 /* Search for the vlan id in the VLVF entries. Save off the first empty
3906 * slot found along the way.
3908 * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1
3910 for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) {
3911 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
3914 if (!first_empty_slot && !bits)
3915 first_empty_slot = regindex;
3918 /* If we are here then we didn't find the VLAN. Return first empty
3919 * slot we found during our search, else error.
3921 if (!first_empty_slot)
3922 ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n");
3924 return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE;
3928 * ixgbe_set_vfta_generic - Set VLAN filter table
3929 * @hw: pointer to hardware structure
3930 * @vlan: VLAN id to write to VLAN filter
3931 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3932 * @vlan_on: boolean flag to turn on/off VLAN
3933 * @vlvf_bypass: boolean flag indicating updating default pool is okay
3935 * Turn on/off specified VLAN in the VLAN filter table.
3937 s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
3938 bool vlan_on, bool vlvf_bypass)
3940 u32 regidx, vfta_delta, vfta;
3943 DEBUGFUNC("ixgbe_set_vfta_generic");
3945 if (vlan > 4095 || vind > 63)
3946 return IXGBE_ERR_PARAM;
3949 * this is a 2 part operation - first the VFTA, then the
3950 * VLVF and VLVFB if VT Mode is set
3951 * We don't write the VFTA until we know the VLVF part succeeded.
3955 * The VFTA is a bitstring made up of 128 32-bit registers
3956 * that enable the particular VLAN id, much like the MTA:
3957 * bits[11-5]: which register
3958 * bits[4-0]: which bit in the register
3961 vfta_delta = 1 << (vlan % 32);
3962 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
3965 * vfta_delta represents the difference between the current value
3966 * of vfta and the value we want in the register. Since the diff
3967 * is an XOR mask we can just update the vfta using an XOR
3969 vfta_delta &= vlan_on ? ~vfta : vfta;
3973 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF
3975 ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta,
3977 if (ret_val != IXGBE_SUCCESS) {
3984 /* Update VFTA now that we are ready for traffic */
3986 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta);
3988 return IXGBE_SUCCESS;
3992 * ixgbe_set_vlvf_generic - Set VLAN Pool Filter
3993 * @hw: pointer to hardware structure
3994 * @vlan: VLAN id to write to VLAN filter
3995 * @vind: VMDq output index that maps queue to VLAN id in VLVFB
3996 * @vlan_on: boolean flag to turn on/off VLAN in VLVF
3997 * @vfta_delta: pointer to the difference between the current value of VFTA
3998 * and the desired value
3999 * @vfta: the desired value of the VFTA
4000 * @vlvf_bypass: boolean flag indicating updating default pool is okay
4002 * Turn on/off specified bit in VLVF table.
4004 s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
4005 bool vlan_on, u32 *vfta_delta, u32 vfta,
4011 DEBUGFUNC("ixgbe_set_vlvf_generic");
4013 if (vlan > 4095 || vind > 63)
4014 return IXGBE_ERR_PARAM;
4016 /* If VT Mode is set
4018 * make sure the vlan is in VLVF
4019 * set the vind bit in the matching VLVFB
4021 * clear the pool bit and possibly the vind
4023 if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE))
4024 return IXGBE_SUCCESS;
4026 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass);
4030 bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
4032 /* set the pool bit */
4033 bits |= 1 << (vind % 32);
4037 /* clear the pool bit */
4038 bits ^= 1 << (vind % 32);
4041 !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
4042 /* Clear VFTA first, then disable VLVF. Otherwise
4043 * we run the risk of stray packets leaking into
4044 * the PF via the default pool
4047 IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta);
4049 /* disable VLVF and clear remaining bit from pool */
4050 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
4051 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0);
4053 return IXGBE_SUCCESS;
4056 /* If there are still bits set in the VLVFB registers
4057 * for the VLAN ID indicated we need to see if the
4058 * caller is requesting that we clear the VFTA entry bit.
4059 * If the caller has requested that we clear the VFTA
4060 * entry bit but there are still pools/VFs using this VLAN
4061 * ID entry then ignore the request. We're not worried
4062 * about the case where we're turning the VFTA VLAN ID
4063 * entry bit on, only when requested to turn it off as
4064 * there may be multiple pools and/or VFs using the
4065 * VLAN ID entry. In that case we cannot clear the
4066 * VFTA bit until all pools/VFs using that VLAN ID have also
4067 * been cleared. This will be indicated by "bits" being
4073 /* record pool change and enable VLAN ID if not already enabled */
4074 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits);
4075 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan);
4077 return IXGBE_SUCCESS;
4081 * ixgbe_clear_vfta_generic - Clear VLAN filter table
4082 * @hw: pointer to hardware structure
4084 * Clears the VLAN filer table, and the VMDq index associated with the filter
4086 s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
4090 DEBUGFUNC("ixgbe_clear_vfta_generic");
4092 for (offset = 0; offset < hw->mac.vft_size; offset++)
4093 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
4095 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
4096 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
4097 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
4098 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0);
4101 return IXGBE_SUCCESS;
4105 * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
4106 * @hw: pointer to hardware structure
4108 * Contains the logic to identify if we need to verify link for the
4111 static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
4114 /* Does FW say we need the fix */
4115 if (!hw->need_crosstalk_fix)
4118 /* Only consider SFP+ PHYs i.e. media type fiber */
4119 switch (hw->mac.ops.get_media_type(hw)) {
4120 case ixgbe_media_type_fiber:
4121 case ixgbe_media_type_fiber_qsfp:
4131 * ixgbe_check_mac_link_generic - Determine link and speed status
4132 * @hw: pointer to hardware structure
4133 * @speed: pointer to link speed
4134 * @link_up: true when link is up
4135 * @link_up_wait_to_complete: bool used to wait for link up or not
4137 * Reads the links register to determine if link is up and the current speed
4139 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4140 bool *link_up, bool link_up_wait_to_complete)
4142 u32 links_reg, links_orig;
4145 DEBUGFUNC("ixgbe_check_mac_link_generic");
4147 /* If Crosstalk fix enabled do the sanity check of making sure
4148 * the SFP+ cage is full.
4150 if (ixgbe_need_crosstalk_fix(hw)) {
4153 switch (hw->mac.type) {
4154 case ixgbe_mac_82599EB:
4155 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4158 case ixgbe_mac_X550EM_x:
4159 case ixgbe_mac_X550EM_a:
4160 sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
4164 /* sanity check - No SFP+ devices here */
4165 sfp_cage_full = false;
4169 if (!sfp_cage_full) {
4171 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4172 return IXGBE_SUCCESS;
4176 /* clear the old state */
4177 links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
4179 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4181 if (links_orig != links_reg) {
4182 DEBUGOUT2("LINKS changed from %08X to %08X\n",
4183 links_orig, links_reg);
4186 if (link_up_wait_to_complete) {
4187 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4188 if (links_reg & IXGBE_LINKS_UP) {
4195 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
4198 if (links_reg & IXGBE_LINKS_UP)
4204 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
4205 case IXGBE_LINKS_SPEED_10G_82599:
4206 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4207 if (hw->mac.type >= ixgbe_mac_X550) {
4208 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4209 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4212 case IXGBE_LINKS_SPEED_1G_82599:
4213 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4215 case IXGBE_LINKS_SPEED_100_82599:
4216 *speed = IXGBE_LINK_SPEED_100_FULL;
4217 if (hw->mac.type == ixgbe_mac_X550) {
4218 if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
4219 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4222 case IXGBE_LINKS_SPEED_10_X550EM_A:
4223 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4224 #ifdef PREBOOT_SUPPORT
4225 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4226 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L ||
4227 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
4228 hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
4229 *speed = IXGBE_LINK_SPEED_10_FULL;
4231 if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
4232 hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)
4233 *speed = IXGBE_LINK_SPEED_10_FULL;
4234 #endif /* PREBOOT_SUPPORT */
4237 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4240 return IXGBE_SUCCESS;
4244 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
4246 * @hw: pointer to hardware structure
4247 * @wwnn_prefix: the alternative WWNN prefix
4248 * @wwpn_prefix: the alternative WWPN prefix
4250 * This function will read the EEPROM from the alternative SAN MAC address
4251 * block to check the support for the alternative WWNN/WWPN prefix support.
4253 s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
4257 u16 alt_san_mac_blk_offset;
4259 DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
4261 /* clear output first */
4262 *wwnn_prefix = 0xFFFF;
4263 *wwpn_prefix = 0xFFFF;
4265 /* check if alternative SAN MAC is supported */
4266 offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
4267 if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
4268 goto wwn_prefix_err;
4270 if ((alt_san_mac_blk_offset == 0) ||
4271 (alt_san_mac_blk_offset == 0xFFFF))
4272 goto wwn_prefix_out;
4274 /* check capability in alternative san mac address block */
4275 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
4276 if (hw->eeprom.ops.read(hw, offset, &caps))
4277 goto wwn_prefix_err;
4278 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
4279 goto wwn_prefix_out;
4281 /* get the corresponding prefix for WWNN/WWPN */
4282 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
4283 if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) {
4284 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4285 "eeprom read at offset %d failed", offset);
4288 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
4289 if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
4290 goto wwn_prefix_err;
4293 return IXGBE_SUCCESS;
4296 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4297 "eeprom read at offset %d failed", offset);
4298 return IXGBE_SUCCESS;
4302 * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
4303 * @hw: pointer to hardware structure
4304 * @bs: the fcoe boot status
4306 * This function will read the FCOE boot status from the iSCSI FCOE block
4308 s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
4310 u16 offset, caps, flags;
4313 DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
4315 /* clear output first */
4316 *bs = ixgbe_fcoe_bootstatus_unavailable;
4318 /* check if FCOE IBA block is present */
4319 offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
4320 status = hw->eeprom.ops.read(hw, offset, &caps);
4321 if (status != IXGBE_SUCCESS)
4324 if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
4327 /* check if iSCSI FCOE block is populated */
4328 status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
4329 if (status != IXGBE_SUCCESS)
4332 if ((offset == 0) || (offset == 0xFFFF))
4335 /* read fcoe flags in iSCSI FCOE block */
4336 offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
4337 status = hw->eeprom.ops.read(hw, offset, &flags);
4338 if (status != IXGBE_SUCCESS)
4341 if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
4342 *bs = ixgbe_fcoe_bootstatus_enabled;
4344 *bs = ixgbe_fcoe_bootstatus_disabled;
4351 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
4352 * @hw: pointer to hardware structure
4353 * @enable: enable or disable switch for MAC anti-spoofing
4354 * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
4357 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4359 int vf_target_reg = vf >> 3;
4360 int vf_target_shift = vf % 8;
4363 if (hw->mac.type == ixgbe_mac_82598EB)
4366 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4368 pfvfspoof |= (1 << vf_target_shift);
4370 pfvfspoof &= ~(1 << vf_target_shift);
4371 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4375 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
4376 * @hw: pointer to hardware structure
4377 * @enable: enable or disable switch for VLAN anti-spoofing
4378 * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
4381 void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
4383 int vf_target_reg = vf >> 3;
4384 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
4387 if (hw->mac.type == ixgbe_mac_82598EB)
4390 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
4392 pfvfspoof |= (1 << vf_target_shift);
4394 pfvfspoof &= ~(1 << vf_target_shift);
4395 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
4399 * ixgbe_get_device_caps_generic - Get additional device capabilities
4400 * @hw: pointer to hardware structure
4401 * @device_caps: the EEPROM word with the extra device capabilities
4403 * This function will read the EEPROM location for the device capabilities,
4404 * and return the word through device_caps.
4406 s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
4408 DEBUGFUNC("ixgbe_get_device_caps_generic");
4410 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
4412 return IXGBE_SUCCESS;
4416 * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
4417 * @hw: pointer to hardware structure
4420 void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
4425 DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
4427 /* Enable relaxed ordering */
4428 for (i = 0; i < hw->mac.max_tx_queues; i++) {
4429 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
4430 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
4431 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
4434 for (i = 0; i < hw->mac.max_rx_queues; i++) {
4435 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
4436 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
4437 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
4438 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
4444 * ixgbe_calculate_checksum - Calculate checksum for buffer
4445 * @buffer: pointer to EEPROM
4446 * @length: size of EEPROM to calculate a checksum for
4447 * Calculates the checksum for some buffer on a specified length. The
4448 * checksum calculated is returned.
4450 u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
4455 DEBUGFUNC("ixgbe_calculate_checksum");
4460 for (i = 0; i < length; i++)
4463 return (u8) (0 - sum);
4467 * ixgbe_hic_unlocked - Issue command to manageability block unlocked
4468 * @hw: pointer to the HW structure
4469 * @buffer: command to write and where the return status will be placed
4470 * @length: length of buffer, must be multiple of 4 bytes
4471 * @timeout: time in ms to wait for command completion
4473 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4474 * else returns semaphore error when encountering an error acquiring
4475 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4477 * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held
4480 s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length,
4486 DEBUGFUNC("ixgbe_hic_unlocked");
4488 if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4489 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4490 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4493 /* Set bit 9 of FWSTS clearing FW reset indication */
4494 fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
4495 IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI);
4497 /* Check that the host interface is enabled. */
4498 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4499 if (!(hicr & IXGBE_HICR_EN)) {
4500 DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
4501 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4504 /* Calculate length in DWORDs. We must be DWORD aligned */
4505 if (length % sizeof(u32)) {
4506 DEBUGOUT("Buffer length failure, not aligned to dword");
4507 return IXGBE_ERR_INVALID_ARGUMENT;
4510 dword_len = length >> 2;
4512 /* The device driver writes the relevant command block
4513 * into the ram area.
4515 for (i = 0; i < dword_len; i++)
4516 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4517 i, IXGBE_CPU_TO_LE32(buffer[i]));
4519 /* Setting this bit tells the ARC that a new command is pending. */
4520 IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
4522 for (i = 0; i < timeout; i++) {
4523 hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
4524 if (!(hicr & IXGBE_HICR_C))
4529 /* For each command except "Apply Update" perform
4530 * status checks in the HICR registry.
4532 if ((buffer[0] & IXGBE_HOST_INTERFACE_MASK_CMD) ==
4533 IXGBE_HOST_INTERFACE_APPLY_UPDATE_CMD)
4534 return IXGBE_SUCCESS;
4536 /* Check command completion */
4537 if ((timeout && i == timeout) ||
4538 !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
4539 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
4540 "Command has failed with no status valid.\n");
4541 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4544 return IXGBE_SUCCESS;
4548 * ixgbe_host_interface_command - Issue command to manageability block
4549 * @hw: pointer to the HW structure
4550 * @buffer: contains the command to write and where the return status will
4552 * @length: length of buffer, must be multiple of 4 bytes
4553 * @timeout: time in ms to wait for command completion
4554 * @return_data: read and return data from the buffer (true) or not (false)
4555 * Needed because FW structures are big endian and decoding of
4556 * these fields can be 8 bit or 16 bit based on command. Decoding
4557 * is not easily understood without making a table of commands.
4558 * So we will leave this up to the caller to read back the data
4561 * Communicates with the manageability block. On success return IXGBE_SUCCESS
4562 * else returns semaphore error when encountering an error acquiring
4563 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4565 s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
4566 u32 length, u32 timeout, bool return_data)
4568 u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
4569 struct ixgbe_hic_hdr *resp = (struct ixgbe_hic_hdr *)buffer;
4575 DEBUGFUNC("ixgbe_host_interface_command");
4577 if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
4578 DEBUGOUT1("Buffer length failure buffersize=%d.\n", length);
4579 return IXGBE_ERR_HOST_INTERFACE_COMMAND;
4582 /* Take management host interface semaphore */
4583 status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4587 status = ixgbe_hic_unlocked(hw, buffer, length, timeout);
4594 /* Calculate length in DWORDs */
4595 dword_len = hdr_size >> 2;
4597 /* first pull in the header so we know the buffer length */
4598 for (bi = 0; bi < dword_len; bi++) {
4599 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4600 IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4604 * If there is any thing in data position pull it in
4605 * Read Flash command requires reading buffer length from
4606 * two byes instead of one byte
4608 if (resp->cmd == IXGBE_HOST_INTERFACE_FLASH_READ_CMD ||
4609 resp->cmd == IXGBE_HOST_INTERFACE_SHADOW_RAM_READ_CMD) {
4610 for (; bi < dword_len + 2; bi++) {
4611 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG,
4613 IXGBE_LE32_TO_CPUS(&buffer[bi]);
4615 buf_len = (((u16)(resp->cmd_or_resp.ret_status) << 3)
4616 & 0xF00) | resp->buf_len;
4617 hdr_size += (2 << 2);
4619 buf_len = resp->buf_len;
4624 if (length < buf_len + hdr_size) {
4625 DEBUGOUT("Buffer not large enough for reply message.\n");
4626 status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4630 /* Calculate length in DWORDs, add 3 for odd lengths */
4631 dword_len = (buf_len + 3) >> 2;
4633 /* Pull in the rest of the buffer (bi is where we left off) */
4634 for (; bi <= dword_len; bi++) {
4635 buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
4636 IXGBE_LE32_TO_CPUS((uintptr_t)&buffer[bi]);
4640 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
4646 * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
4647 * @hw: pointer to the HW structure
4648 * @maj: driver version major number
4649 * @min: driver version minor number
4650 * @build: driver version build number
4651 * @sub: driver version sub build number
4653 * @driver_ver: unused
4655 * Sends driver version number to firmware through the manageability
4656 * block. On success return IXGBE_SUCCESS
4657 * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
4658 * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
4660 s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
4661 u8 build, u8 sub, u16 len,
4662 const char *driver_ver)
4664 struct ixgbe_hic_drv_info fw_cmd;
4666 s32 ret_val = IXGBE_SUCCESS;
4668 DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
4669 UNREFERENCED_2PARAMETER(len, driver_ver);
4671 fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
4672 fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
4673 fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
4674 fw_cmd.port_num = (u8)hw->bus.func;
4675 fw_cmd.ver_maj = maj;
4676 fw_cmd.ver_min = min;
4677 fw_cmd.ver_build = build;
4678 fw_cmd.ver_sub = sub;
4679 fw_cmd.hdr.checksum = 0;
4682 fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
4683 (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
4685 for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
4686 ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
4688 IXGBE_HI_COMMAND_TIMEOUT,
4690 if (ret_val != IXGBE_SUCCESS)
4693 if (fw_cmd.hdr.cmd_or_resp.ret_status ==
4694 FW_CEM_RESP_STATUS_SUCCESS)
4695 ret_val = IXGBE_SUCCESS;
4697 ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
4706 * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
4707 * @hw: pointer to hardware structure
4708 * @num_pb: number of packet buffers to allocate
4709 * @headroom: reserve n KB of headroom
4710 * @strategy: packet buffer allocation strategy
4712 void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
4715 u32 pbsize = hw->mac.rx_pb_size;
4717 u32 rxpktsize, txpktsize, txpbthresh;
4719 /* Reserve headroom */
4725 /* Divide remaining packet buffer space amongst the number of packet
4726 * buffers requested using supplied strategy.
4729 case PBA_STRATEGY_WEIGHTED:
4730 /* ixgbe_dcb_pba_80_48 strategy weight first half of packet
4731 * buffer with 5/8 of the packet buffer space.
4733 rxpktsize = (pbsize * 5) / (num_pb * 4);
4734 pbsize -= rxpktsize * (num_pb / 2);
4735 rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
4736 for (; i < (num_pb / 2); i++)
4737 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4738 /* fall through - configure remaining packet buffers */
4739 case PBA_STRATEGY_EQUAL:
4740 rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
4741 for (; i < num_pb; i++)
4742 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
4748 /* Only support an equally distributed Tx packet buffer strategy. */
4749 txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
4750 txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
4751 for (i = 0; i < num_pb; i++) {
4752 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
4753 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
4756 /* Clear unused TCs, if any, to zero buffer size*/
4757 for (; i < IXGBE_MAX_PB; i++) {
4758 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
4759 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
4760 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
4765 * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
4766 * @hw: pointer to the hardware structure
4768 * The 82599 and x540 MACs can experience issues if TX work is still pending
4769 * when a reset occurs. This function prevents this by flushing the PCIe
4770 * buffers on the system.
4772 void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
4774 u32 gcr_ext, hlreg0, i, poll;
4778 * If double reset is not requested then all transactions should
4779 * already be clear and as such there is no work to do
4781 if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
4785 * Set loopback enable to prevent any transmits from being sent
4786 * should the link come up. This assumes that the RXCTRL.RXEN bit
4787 * has already been cleared.
4789 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
4790 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
4792 /* Wait for a last completion before clearing buffers */
4793 IXGBE_WRITE_FLUSH(hw);
4797 * Before proceeding, make sure that the PCIe block does not have
4798 * transactions pending.
4800 poll = ixgbe_pcie_timeout_poll(hw);
4801 for (i = 0; i < poll; i++) {
4803 value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS);
4804 if (IXGBE_REMOVED(hw->hw_addr))
4806 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
4811 /* initiate cleaning flow for buffers in the PCIe transaction layer */
4812 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
4813 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
4814 gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
4816 /* Flush all writes and allow 20usec for all transactions to clear */
4817 IXGBE_WRITE_FLUSH(hw);
4820 /* restore previous register values */
4821 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
4822 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
4825 STATIC const u8 ixgbe_emc_temp_data[4] = {
4826 IXGBE_EMC_INTERNAL_DATA,
4827 IXGBE_EMC_DIODE1_DATA,
4828 IXGBE_EMC_DIODE2_DATA,
4829 IXGBE_EMC_DIODE3_DATA
4831 STATIC const u8 ixgbe_emc_therm_limit[4] = {
4832 IXGBE_EMC_INTERNAL_THERM_LIMIT,
4833 IXGBE_EMC_DIODE1_THERM_LIMIT,
4834 IXGBE_EMC_DIODE2_THERM_LIMIT,
4835 IXGBE_EMC_DIODE3_THERM_LIMIT
4839 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
4840 * @hw: pointer to hardware structure
4842 * Returns the thermal sensor data structure
4844 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
4846 s32 status = IXGBE_SUCCESS;
4854 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4856 DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic");
4858 /* Only support thermal sensors attached to 82599 physical port 0 */
4859 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4860 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
4861 status = IXGBE_NOT_IMPLEMENTED;
4865 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset);
4869 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) {
4870 status = IXGBE_NOT_IMPLEMENTED;
4874 status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg);
4878 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4879 != IXGBE_ETS_TYPE_EMC) {
4880 status = IXGBE_NOT_IMPLEMENTED;
4884 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4885 if (num_sensors > IXGBE_MAX_SENSORS)
4886 num_sensors = IXGBE_MAX_SENSORS;
4888 for (i = 0; i < num_sensors; i++) {
4889 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
4894 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4895 IXGBE_ETS_DATA_INDEX_SHIFT);
4896 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4897 IXGBE_ETS_DATA_LOC_SHIFT);
4899 if (sensor_location != 0) {
4900 status = hw->phy.ops.read_i2c_byte(hw,
4901 ixgbe_emc_temp_data[sensor_index],
4902 IXGBE_I2C_THERMAL_SENSOR_ADDR,
4903 &data->sensor[i].temp);
4913 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
4914 * @hw: pointer to hardware structure
4916 * Inits the thermal sensor thresholds according to the NVM map
4917 * and save off the threshold and location values into mac.thermal_sensor_data
4919 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
4921 s32 status = IXGBE_SUCCESS;
4926 u8 low_thresh_delta;
4932 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
4934 DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic");
4936 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
4938 /* Only support thermal sensors attached to 82599 physical port 0 */
4939 if ((hw->mac.type != ixgbe_mac_82599EB) ||
4940 (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
4941 return IXGBE_NOT_IMPLEMENTED;
4943 offset = IXGBE_ETS_CFG;
4944 if (hw->eeprom.ops.read(hw, offset, &ets_offset))
4946 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
4947 return IXGBE_NOT_IMPLEMENTED;
4949 offset = ets_offset;
4950 if (hw->eeprom.ops.read(hw, offset, &ets_cfg))
4952 if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT)
4953 != IXGBE_ETS_TYPE_EMC)
4954 return IXGBE_NOT_IMPLEMENTED;
4956 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
4957 IXGBE_ETS_LTHRES_DELTA_SHIFT);
4958 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
4960 for (i = 0; i < num_sensors; i++) {
4961 offset = ets_offset + 1 + i;
4962 if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) {
4963 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4964 "eeprom read at offset %d failed",
4968 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
4969 IXGBE_ETS_DATA_INDEX_SHIFT);
4970 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
4971 IXGBE_ETS_DATA_LOC_SHIFT);
4972 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
4974 hw->phy.ops.write_i2c_byte(hw,
4975 ixgbe_emc_therm_limit[sensor_index],
4976 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
4978 if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) {
4979 data->sensor[i].location = sensor_location;
4980 data->sensor[i].caution_thresh = therm_limit;
4981 data->sensor[i].max_op_thresh = therm_limit -
4988 ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE,
4989 "eeprom read at offset %d failed", offset);
4990 return IXGBE_NOT_IMPLEMENTED;
4994 * ixgbe_get_orom_version - Return option ROM from EEPROM
4996 * @hw: pointer to hardware structure
4997 * @nvm_ver: pointer to output structure
4999 * if valid option ROM version, nvm_ver->or_valid set to true
5000 * else nvm_ver->or_valid is false.
5002 void ixgbe_get_orom_version(struct ixgbe_hw *hw,
5003 struct ixgbe_nvm_version *nvm_ver)
5005 u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl;
5007 nvm_ver->or_valid = false;
5008 /* Option Rom may or may not be present. Start with pointer */
5009 hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset);
5011 /* make sure offset is valid */
5012 if ((offset == 0x0) || (offset == NVM_INVALID_PTR))
5015 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh);
5016 hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl);
5018 /* option rom exists and is valid */
5019 if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 ||
5020 eeprom_cfg_blkl == NVM_VER_INVALID ||
5021 eeprom_cfg_blkh == NVM_VER_INVALID)
5024 nvm_ver->or_valid = true;
5025 nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT;
5026 nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) |
5027 (eeprom_cfg_blkh >> NVM_OROM_SHIFT);
5028 nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK;
5032 * ixgbe_get_oem_prod_version - Return OEM Product version
5034 * @hw: pointer to hardware structure
5035 * @nvm_ver: pointer to output structure
5037 * if valid OEM product version, nvm_ver->oem_valid set to true
5038 * else nvm_ver->oem_valid is false.
5040 void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
5041 struct ixgbe_nvm_version *nvm_ver)
5043 u16 rel_num, prod_ver, mod_len, cap, offset;
5045 nvm_ver->oem_valid = false;
5046 hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset);
5048 /* Return is offset to OEM Product Version block is invalid */
5049 if (offset == 0x0 || offset == NVM_INVALID_PTR)
5052 /* Read product version block */
5053 hw->eeprom.ops.read(hw, offset, &mod_len);
5054 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap);
5056 /* Return if OEM product version block is invalid */
5057 if (mod_len != NVM_OEM_PROD_VER_MOD_LEN ||
5058 (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0)
5061 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver);
5062 hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num);
5064 /* Return if version is invalid */
5065 if ((rel_num | prod_ver) == 0x0 ||
5066 rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID)
5069 nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT;
5070 nvm_ver->oem_minor = prod_ver & NVM_VER_MASK;
5071 nvm_ver->oem_release = rel_num;
5072 nvm_ver->oem_valid = true;
5076 * ixgbe_get_etk_id - Return Etrack ID from EEPROM
5078 * @hw: pointer to hardware structure
5079 * @nvm_ver: pointer to output structure
5081 * word read errors will return 0xFFFF
5083 void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
5085 u16 etk_id_l, etk_id_h;
5087 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l))
5088 etk_id_l = NVM_VER_INVALID;
5089 if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h))
5090 etk_id_h = NVM_VER_INVALID;
5092 /* The word order for the version format is determined by high order
5095 if ((etk_id_h & NVM_ETK_VALID) == 0) {
5096 nvm_ver->etk_id = etk_id_h;
5097 nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT);
5099 nvm_ver->etk_id = etk_id_l;
5100 nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT);
5106 * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
5107 * @hw: pointer to hardware structure
5108 * @map: pointer to u8 arr for returning map
5110 * Read the rtrup2tc HW register and resolve its content into map
5112 void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map)
5116 reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
5117 for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
5118 map[i] = IXGBE_RTRUP2TC_UP_MASK &
5119 (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
5123 void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
5128 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5129 if (rxctrl & IXGBE_RXCTRL_RXEN) {
5130 if (hw->mac.type != ixgbe_mac_82598EB) {
5131 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5132 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
5133 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
5134 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5135 hw->mac.set_lben = true;
5137 hw->mac.set_lben = false;
5140 rxctrl &= ~IXGBE_RXCTRL_RXEN;
5141 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
5145 void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
5150 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
5151 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
5153 if (hw->mac.type != ixgbe_mac_82598EB) {
5154 if (hw->mac.set_lben) {
5155 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
5156 pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
5157 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
5158 hw->mac.set_lben = false;
5164 * ixgbe_mng_present - returns true when management capability is present
5165 * @hw: pointer to hardware structure
5167 bool ixgbe_mng_present(struct ixgbe_hw *hw)
5171 if (hw->mac.type < ixgbe_mac_82599EB)
5174 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5176 return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
5180 * ixgbe_mng_enabled - Is the manageability engine enabled?
5181 * @hw: pointer to hardware structure
5183 * Returns true if the manageability engine is enabled.
5185 bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
5187 u32 fwsm, manc, factps;
5189 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw));
5190 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
5193 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
5194 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
5197 if (hw->mac.type <= ixgbe_mac_X540) {
5198 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw));
5199 if (factps & IXGBE_FACTPS_MNGCG)
5207 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
5208 * @hw: pointer to hardware structure
5209 * @speed: new link speed
5210 * @autoneg_wait_to_complete: true when waiting for completion is needed
5212 * Set the link speed in the MAC and/or PHY register and restarts link.
5214 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
5215 ixgbe_link_speed speed,
5216 bool autoneg_wait_to_complete)
5218 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5219 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
5220 s32 status = IXGBE_SUCCESS;
5223 bool autoneg, link_up = false;
5225 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
5227 /* Mask off requested but non-supported speeds */
5228 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
5229 if (status != IXGBE_SUCCESS)
5232 speed &= link_speed;
5234 /* Try each speed one by one, highest priority first. We do this in
5235 * software because 10Gb fiber doesn't support speed autonegotiation.
5237 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
5239 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5241 /* Set the module link speed */
5242 switch (hw->phy.media_type) {
5243 case ixgbe_media_type_fiber:
5244 ixgbe_set_rate_select_speed(hw,
5245 IXGBE_LINK_SPEED_10GB_FULL);
5247 case ixgbe_media_type_fiber_qsfp:
5248 /* QSFP module automatically detects MAC link speed */
5251 DEBUGOUT("Unexpected media type.\n");
5255 /* Allow module to change analog characteristics (1G->10G) */
5258 status = ixgbe_setup_mac_link(hw,
5259 IXGBE_LINK_SPEED_10GB_FULL,
5260 autoneg_wait_to_complete);
5261 if (status != IXGBE_SUCCESS)
5264 /* Flap the Tx laser if it has not already been done */
5265 ixgbe_flap_tx_laser(hw);
5267 /* Wait for the controller to acquire link. Per IEEE 802.3ap,
5268 * Section 73.10.2, we may have to wait up to 1000ms if KR is
5269 * attempted. 82599 uses the same timing for 10g SFI.
5271 for (i = 0; i < 10; i++) {
5272 /* Wait for the link partner to also set speed */
5275 /* If we have link, just jump out */
5276 status = ixgbe_check_link(hw, &link_speed,
5278 if (status != IXGBE_SUCCESS)
5286 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
5288 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
5289 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
5291 /* Set the module link speed */
5292 switch (hw->phy.media_type) {
5293 case ixgbe_media_type_fiber:
5294 ixgbe_set_rate_select_speed(hw,
5295 IXGBE_LINK_SPEED_1GB_FULL);
5297 case ixgbe_media_type_fiber_qsfp:
5298 /* QSFP module automatically detects link speed */
5301 DEBUGOUT("Unexpected media type.\n");
5305 /* Allow module to change analog characteristics (10G->1G) */
5308 status = ixgbe_setup_mac_link(hw,
5309 IXGBE_LINK_SPEED_1GB_FULL,
5310 autoneg_wait_to_complete);
5311 if (status != IXGBE_SUCCESS)
5314 /* Flap the Tx laser if it has not already been done */
5315 ixgbe_flap_tx_laser(hw);
5317 /* Wait for the link partner to also set speed */
5320 /* If we have link, just jump out */
5321 status = ixgbe_check_link(hw, &link_speed, &link_up, false);
5322 if (status != IXGBE_SUCCESS)
5329 /* We didn't get link. Configure back to the highest speed we tried,
5330 * (if there was more than one). We call ourselves back with just the
5331 * single highest speed that the user requested.
5334 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
5336 autoneg_wait_to_complete);
5339 /* Set autoneg_advertised value based on input link speed */
5340 hw->phy.autoneg_advertised = 0;
5342 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
5343 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
5345 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
5346 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
5352 * ixgbe_set_soft_rate_select_speed - Set module link speed
5353 * @hw: pointer to hardware structure
5354 * @speed: link speed to set
5356 * Set module link speed via the soft rate select.
5358 void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw,
5359 ixgbe_link_speed speed)
5365 case IXGBE_LINK_SPEED_10GB_FULL:
5366 /* one bit mask same as setting on */
5367 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
5369 case IXGBE_LINK_SPEED_1GB_FULL:
5370 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
5373 DEBUGOUT("Invalid fixed module speed\n");
5378 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5379 IXGBE_I2C_EEPROM_DEV_ADDR2,
5382 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
5386 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5388 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
5389 IXGBE_I2C_EEPROM_DEV_ADDR2,
5392 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
5397 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5398 IXGBE_I2C_EEPROM_DEV_ADDR2,
5401 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
5405 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
5407 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
5408 IXGBE_I2C_EEPROM_DEV_ADDR2,
5411 DEBUGOUT("Failed to write Rx Rate Select RS1\n");