1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11 *******************************************************************************/
13 #include "ixgbe_type.h"
14 #include "ixgbe_82598.h"
15 #include "ixgbe_api.h"
16 #include "ixgbe_common.h"
17 #include "ixgbe_phy.h"
19 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
20 ixgbe_link_speed *speed,
22 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
23 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
24 bool autoneg_wait_to_complete);
25 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
26 ixgbe_link_speed *speed, bool *link_up,
27 bool link_up_wait_to_complete);
28 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
29 ixgbe_link_speed speed,
31 bool autoneg_wait_to_complete);
32 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
33 ixgbe_link_speed speed,
35 bool autoneg_wait_to_complete);
36 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
37 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
38 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
39 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
40 u32 headroom, int strategy);
43 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
44 * @hw: pointer to the HW structure
46 * The defaults for 82598 should be in the range of 50us to 50ms,
47 * however the hardware default for these parts is 500us to 1ms which is less
48 * than the 10ms recommended by the pci-e spec. To address this we need to
49 * increase the value to either 10ms to 250ms for capability version 1 config,
50 * or 16ms to 55ms for version 2.
52 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
54 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
57 /* only take action if timeout value is defaulted to 0 */
58 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
62 * if capababilities version is type 1 we can write the
63 * timeout of 10ms to 250ms through the GCR register
65 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
66 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
71 * for version 2 capabilities we need to write the config space
72 * directly in order to set the completion timeout value for
75 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
76 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
77 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
79 /* disable completion timeout resend */
80 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
81 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
85 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
86 * @hw: pointer to hardware structure
88 * Initialize the function pointers and assign the MAC type for 82598.
89 * Does not touch the hardware.
91 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
93 struct ixgbe_mac_info *mac = &hw->mac;
94 struct ixgbe_phy_info *phy = &hw->phy;
97 ret_val = ixgbe_init_phy_ops_generic(hw);
98 ret_val = ixgbe_init_ops_generic(hw);
101 phy->ops.init = &ixgbe_init_phy_ops_82598;
104 mac->ops.start_hw = &ixgbe_start_hw_82598;
105 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
106 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
107 mac->ops.get_supported_physical_layer =
108 &ixgbe_get_supported_physical_layer_82598;
109 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
110 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
111 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
113 /* RAR, Multicast, VLAN */
114 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
115 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
116 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
117 mac->ops.set_vlvf = NULL;
118 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
121 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
123 mac->mcft_size = 128;
125 mac->num_rar_entries = 16;
126 mac->rx_pb_size = 512;
127 mac->max_tx_queues = 32;
128 mac->max_rx_queues = 64;
129 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
132 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
135 mac->ops.check_link = &ixgbe_check_mac_link_82598;
136 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
137 mac->ops.flap_tx_laser = NULL;
138 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
139 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
141 /* Manageability interface */
142 mac->ops.set_fw_drv_ver = NULL;
148 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
149 * @hw: pointer to hardware structure
151 * Initialize any function pointers that were not able to be
152 * set during init_shared_code because the PHY/SFP type was
153 * not known. Perform the SFP init if necessary.
156 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
158 struct ixgbe_mac_info *mac = &hw->mac;
159 struct ixgbe_phy_info *phy = &hw->phy;
161 u16 list_offset, data_offset;
163 /* Identify the PHY */
164 phy->ops.identify(hw);
166 /* Overwrite the link function pointers if copper PHY */
167 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
168 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
169 mac->ops.get_link_capabilities =
170 &ixgbe_get_copper_link_capabilities_generic;
173 switch (hw->phy.type) {
175 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
176 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
177 phy->ops.get_firmware_version =
178 &ixgbe_get_phy_firmware_version_tnx;
181 phy->ops.reset = &ixgbe_reset_phy_nl;
183 /* Call SFP+ identify routine to get the SFP+ module type */
184 ret_val = phy->ops.identify_sfp(hw);
187 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
188 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
192 /* Check to see if SFP+ module is supported */
193 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
197 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
210 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
211 * @hw: pointer to hardware structure
213 * Starts the hardware using the generic start_hw function.
214 * Disables relaxed ordering Then set pcie completion timeout
217 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
223 ret_val = ixgbe_start_hw_generic(hw);
225 /* Disable relaxed ordering */
226 for (i = 0; ((i < hw->mac.max_tx_queues) &&
227 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
228 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
229 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
230 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
233 for (i = 0; ((i < hw->mac.max_rx_queues) &&
234 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
235 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
236 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
237 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
238 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
241 /* set the completion timeout for interface */
243 ixgbe_set_pcie_completion_timeout(hw);
249 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
250 * @hw: pointer to hardware structure
251 * @speed: pointer to link speed
252 * @autoneg: boolean auto-negotiation value
254 * Determines the link capabilities by reading the AUTOC register.
256 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
257 ixgbe_link_speed *speed,
264 * Determine link capabilities based on the stored value of AUTOC,
265 * which represents EEPROM defaults. If AUTOC value has not been
266 * stored, use the current register value.
268 if (hw->mac.orig_link_settings_stored)
269 autoc = hw->mac.orig_autoc;
271 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
273 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
274 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
275 *speed = IXGBE_LINK_SPEED_1GB_FULL;
279 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
280 *speed = IXGBE_LINK_SPEED_10GB_FULL;
284 case IXGBE_AUTOC_LMS_1G_AN:
285 *speed = IXGBE_LINK_SPEED_1GB_FULL;
289 case IXGBE_AUTOC_LMS_KX4_AN:
290 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
291 *speed = IXGBE_LINK_SPEED_UNKNOWN;
292 if (autoc & IXGBE_AUTOC_KX4_SUPP)
293 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
294 if (autoc & IXGBE_AUTOC_KX_SUPP)
295 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
300 status = IXGBE_ERR_LINK_SETUP;
308 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure
311 * Returns the media type (fiber, copper, backplane)
313 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
315 enum ixgbe_media_type media_type;
317 /* Detect if there is a copper PHY attached. */
318 switch (hw->phy.type) {
319 case ixgbe_phy_cu_unknown:
321 media_type = ixgbe_media_type_copper;
327 /* Media type for I82598 is based on device ID */
328 switch (hw->device_id) {
329 case IXGBE_DEV_ID_82598:
330 case IXGBE_DEV_ID_82598_BX:
331 /* Default device ID is mezzanine card KX/KX4 */
332 media_type = ixgbe_media_type_backplane;
334 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
335 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
336 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
337 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
338 case IXGBE_DEV_ID_82598EB_XF_LR:
339 case IXGBE_DEV_ID_82598EB_SFP_LOM:
340 media_type = ixgbe_media_type_fiber;
342 case IXGBE_DEV_ID_82598EB_CX4:
343 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
344 media_type = ixgbe_media_type_cx4;
346 case IXGBE_DEV_ID_82598AT:
347 case IXGBE_DEV_ID_82598AT2:
348 media_type = ixgbe_media_type_copper;
351 media_type = ixgbe_media_type_unknown;
359 * ixgbe_fc_enable_82598 - Enable flow control
360 * @hw: pointer to hardware structure
362 * Enable flow control according to the current settings.
364 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
375 /* Validate the water mark configuration */
376 if (!hw->fc.pause_time) {
377 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
381 /* Low water mark of zero causes XOFF floods */
382 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
383 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
384 hw->fc.high_water[i]) {
385 if (!hw->fc.low_water[i] ||
386 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
387 hw_dbg(hw, "Invalid water mark configuration\n");
388 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
395 * On 82598 having Rx FC on causes resets while doing 1G
396 * so if it's on turn it off once we know link_speed. For
397 * more details see 82598 Specification update.
399 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
400 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
401 switch (hw->fc.requested_mode) {
403 hw->fc.requested_mode = ixgbe_fc_tx_pause;
405 case ixgbe_fc_rx_pause:
406 hw->fc.requested_mode = ixgbe_fc_none;
414 /* Negotiate the fc mode to use */
415 ixgbe_fc_autoneg(hw);
417 /* Disable any previous flow control settings */
418 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
419 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
421 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
422 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
425 * The possible values of fc.current_mode are:
426 * 0: Flow control is completely disabled
427 * 1: Rx flow control is enabled (we can receive pause frames,
428 * but not send pause frames).
429 * 2: Tx flow control is enabled (we can send pause frames but
430 * we do not support receiving pause frames).
431 * 3: Both Rx and Tx flow control (symmetric) are enabled.
434 switch (hw->fc.current_mode) {
437 * Flow control is disabled by software override or autoneg.
438 * The code below will actually disable it in the HW.
441 case ixgbe_fc_rx_pause:
443 * Rx Flow control is enabled and Tx Flow control is
444 * disabled by software override. Since there really
445 * isn't a way to advertise that we are capable of RX
446 * Pause ONLY, we will advertise that we support both
447 * symmetric and asymmetric Rx PAUSE. Later, we will
448 * disable the adapter's ability to send PAUSE frames.
450 fctrl_reg |= IXGBE_FCTRL_RFCE;
452 case ixgbe_fc_tx_pause:
454 * Tx Flow control is enabled, and Rx Flow control is
455 * disabled by software override.
457 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
460 /* Flow control (both Rx and Tx) is enabled by SW override. */
461 fctrl_reg |= IXGBE_FCTRL_RFCE;
462 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
465 hw_dbg(hw, "Flow control param set incorrectly\n");
466 ret_val = IXGBE_ERR_CONFIG;
471 /* Set 802.3x based flow control settings. */
472 fctrl_reg |= IXGBE_FCTRL_DPF;
473 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
474 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
476 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
477 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
478 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
479 hw->fc.high_water[i]) {
480 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
481 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
482 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
483 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
485 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
486 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
491 /* Configure pause time (2 TCs per register) */
492 reg = hw->fc.pause_time * 0x00010001;
493 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
494 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
496 /* Configure flow control refresh threshold value */
497 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
504 * ixgbe_start_mac_link_82598 - Configures MAC link settings
505 * @hw: pointer to hardware structure
507 * Configures link settings based on values in the ixgbe_hw struct.
508 * Restarts the link. Performs autonegotiation if needed.
510 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
511 bool autoneg_wait_to_complete)
519 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
520 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
521 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
523 /* Only poll for autoneg to complete if specified to do so */
524 if (autoneg_wait_to_complete) {
525 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
526 IXGBE_AUTOC_LMS_KX4_AN ||
527 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
528 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
529 links_reg = 0; /* Just in case Autoneg time = 0 */
530 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
531 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
532 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
536 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
537 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
538 hw_dbg(hw, "Autonegotiation did not complete.\n");
543 /* Add delay to filter out noises during initial link setup */
550 * ixgbe_validate_link_ready - Function looks for phy link
551 * @hw: pointer to hardware structure
553 * Function indicates success when phy link is available. If phy is not ready
554 * within 5 seconds of MAC indicating link, the function returns error.
556 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
561 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
565 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
566 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
567 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
569 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
570 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
576 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
577 hw_dbg(hw, "Link was indicated but link is down\n");
578 return IXGBE_ERR_LINK_SETUP;
585 * ixgbe_check_mac_link_82598 - Get link/speed status
586 * @hw: pointer to hardware structure
587 * @speed: pointer to link speed
588 * @link_up: true is link is up, false otherwise
589 * @link_up_wait_to_complete: bool used to wait for link up or not
591 * Reads the links register to determine if link is up and the current speed
593 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
594 ixgbe_link_speed *speed, bool *link_up,
595 bool link_up_wait_to_complete)
599 u16 link_reg, adapt_comp_reg;
602 * SERDES PHY requires us to read link status from undocumented
603 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
604 * indicates link down. OxC00C is read to check that the XAUI lanes
605 * are active. Bit 0 clear indicates active; set indicates inactive.
607 if (hw->phy.type == ixgbe_phy_nl) {
608 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
609 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
610 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
612 if (link_up_wait_to_complete) {
613 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
614 if ((link_reg & 1) &&
615 ((adapt_comp_reg & 1) == 0)) {
622 hw->phy.ops.read_reg(hw, 0xC79F,
625 hw->phy.ops.read_reg(hw, 0xC00C,
630 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
636 if (*link_up == false)
640 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
641 if (link_up_wait_to_complete) {
642 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
643 if (links_reg & IXGBE_LINKS_UP) {
650 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
653 if (links_reg & IXGBE_LINKS_UP)
659 if (links_reg & IXGBE_LINKS_SPEED)
660 *speed = IXGBE_LINK_SPEED_10GB_FULL;
662 *speed = IXGBE_LINK_SPEED_1GB_FULL;
664 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) &&
665 (ixgbe_validate_link_ready(hw) != 0))
673 * ixgbe_setup_mac_link_82598 - Set MAC link speed
674 * @hw: pointer to hardware structure
675 * @speed: new link speed
676 * @autoneg: true if autonegotiation enabled
677 * @autoneg_wait_to_complete: true when waiting for completion is needed
679 * Set the link speed in the AUTOC register and restarts link.
681 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
682 ixgbe_link_speed speed, bool autoneg,
683 bool autoneg_wait_to_complete)
686 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
687 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
688 u32 autoc = curr_autoc;
689 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
691 /* Check to see if speed passed in is supported. */
692 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
693 speed &= link_capabilities;
695 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
696 status = IXGBE_ERR_LINK_SETUP;
698 /* Set KX4/KX support according to speed requested */
699 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
700 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
701 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
702 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
703 autoc |= IXGBE_AUTOC_KX4_SUPP;
704 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
705 autoc |= IXGBE_AUTOC_KX_SUPP;
706 if (autoc != curr_autoc)
707 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
712 * Setup and restart the link based on the new values in
713 * ixgbe_hw This will write the AUTOC register based on the new
716 status = ixgbe_start_mac_link_82598(hw,
717 autoneg_wait_to_complete);
725 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
726 * @hw: pointer to hardware structure
727 * @speed: new link speed
728 * @autoneg: true if autonegotiation enabled
729 * @autoneg_wait_to_complete: true if waiting is needed to complete
731 * Sets the link speed in the AUTOC register in the MAC and restarts link.
733 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
734 ixgbe_link_speed speed,
736 bool autoneg_wait_to_complete)
740 /* Setup the PHY according to input speed */
741 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
742 autoneg_wait_to_complete);
744 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
750 * ixgbe_reset_hw_82598 - Performs hardware reset
751 * @hw: pointer to hardware structure
753 * Resets the hardware by resetting the transmit and receive units, masks and
754 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
757 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
767 /* Call adapter stop to disable tx/rx and clear interrupts */
768 status = hw->mac.ops.stop_adapter(hw);
773 * Power up the Atlas Tx lanes if they are currently powered down.
774 * Atlas Tx lanes are powered down for MAC loopback tests, but
775 * they are not automatically restored on reset.
777 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
778 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
779 /* Enable Tx Atlas so packets can be transmitted again */
780 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
782 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
783 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
786 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
788 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
789 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
792 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
794 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
795 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
798 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
800 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
801 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
806 if (hw->phy.reset_disable == false) {
807 /* PHY ops must be identified and initialized prior to reset */
809 /* Init PHY and function pointers, perform SFP setup */
810 phy_status = hw->phy.ops.init(hw);
811 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
813 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
816 hw->phy.ops.reset(hw);
821 * Issue global reset to the MAC. This needs to be a SW reset.
822 * If link reset is used, it might reset the MAC when mng is using it
824 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
825 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
826 IXGBE_WRITE_FLUSH(hw);
828 /* Poll for reset bit to self-clear indicating reset is complete */
829 for (i = 0; i < 10; i++) {
831 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
832 if (!(ctrl & IXGBE_CTRL_RST))
835 if (ctrl & IXGBE_CTRL_RST) {
836 status = IXGBE_ERR_RESET_FAILED;
837 hw_dbg(hw, "Reset polling failed to complete.\n");
843 * Double resets are required for recovery from certain error
844 * conditions. Between resets, it is necessary to stall to allow time
845 * for any pending HW events to complete.
847 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
848 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
852 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
853 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
854 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
857 * Store the original AUTOC value if it has not been
858 * stored off yet. Otherwise restore the stored original
859 * AUTOC value since the reset operation sets back to deaults.
861 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
862 if (hw->mac.orig_link_settings_stored == false) {
863 hw->mac.orig_autoc = autoc;
864 hw->mac.orig_link_settings_stored = true;
865 } else if (autoc != hw->mac.orig_autoc) {
866 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
869 /* Store the permanent mac address */
870 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
873 * Store MAC address from RAR0, clear receive address registers, and
874 * clear the multicast table
876 hw->mac.ops.init_rx_addrs(hw);
886 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
887 * @hw: pointer to hardware struct
888 * @rar: receive address register index to associate with a VMDq index
889 * @vmdq: VMDq set index
891 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
894 u32 rar_entries = hw->mac.num_rar_entries;
896 /* Make sure we are using a valid rar index range */
897 if (rar >= rar_entries) {
898 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
899 return IXGBE_ERR_INVALID_ARGUMENT;
902 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
903 rar_high &= ~IXGBE_RAH_VIND_MASK;
904 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
905 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
910 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
911 * @hw: pointer to hardware struct
912 * @rar: receive address register index to associate with a VMDq index
913 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
915 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
918 u32 rar_entries = hw->mac.num_rar_entries;
921 /* Make sure we are using a valid rar index range */
922 if (rar >= rar_entries) {
923 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
924 return IXGBE_ERR_INVALID_ARGUMENT;
927 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
928 if (rar_high & IXGBE_RAH_VIND_MASK) {
929 rar_high &= ~IXGBE_RAH_VIND_MASK;
930 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
937 * ixgbe_set_vfta_82598 - Set VLAN filter table
938 * @hw: pointer to hardware structure
939 * @vlan: VLAN id to write to VLAN filter
940 * @vind: VMDq output index that maps queue to VLAN id in VFTA
941 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
943 * Turn on/off specified VLAN in the VLAN filter table.
945 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
954 return IXGBE_ERR_PARAM;
956 /* Determine 32-bit word position in array */
957 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
959 /* Determine the location of the (VMD) queue index */
960 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
961 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
963 /* Set the nibble for VMD queue index */
964 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
965 bits &= (~(0x0F << bitindex));
966 bits |= (vind << bitindex);
967 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
969 /* Determine the location of the bit for this VLAN id */
970 bitindex = vlan & 0x1F; /* lower five bits */
972 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
974 /* Turn on this VLAN id */
975 bits |= (1 << bitindex);
977 /* Turn off this VLAN id */
978 bits &= ~(1 << bitindex);
979 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
985 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
986 * @hw: pointer to hardware structure
988 * Clears the VLAN filer table, and the VMDq index associated with the filter
990 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
995 for (offset = 0; offset < hw->mac.vft_size; offset++)
996 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
998 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
999 for (offset = 0; offset < hw->mac.vft_size; offset++)
1000 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1007 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1008 * @hw: pointer to hardware structure
1009 * @reg: analog register to read
1012 * Performs read operation to Atlas analog register specified.
1014 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1018 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1019 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1020 IXGBE_WRITE_FLUSH(hw);
1022 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1023 *val = (u8)atlas_ctl;
1029 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1030 * @hw: pointer to hardware structure
1031 * @reg: atlas register to write
1032 * @val: value to write
1034 * Performs write operation to Atlas analog register specified.
1036 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1040 atlas_ctl = (reg << 8) | val;
1041 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1042 IXGBE_WRITE_FLUSH(hw);
1049 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1050 * @hw: pointer to hardware structure
1051 * @byte_offset: EEPROM byte offset to read
1052 * @eeprom_data: value read
1054 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1056 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1065 if (hw->phy.type == ixgbe_phy_nl) {
1067 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1068 * 0xC30D. These registers are used to talk to the SFP+
1069 * module's EEPROM through the SDA/SCL (I2C) interface.
1071 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1072 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1073 hw->phy.ops.write_reg(hw,
1074 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1075 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1079 for (i = 0; i < 100; i++) {
1080 hw->phy.ops.read_reg(hw,
1081 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1082 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1084 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1085 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1090 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1091 hw_dbg(hw, "EEPROM read did not pass.\n");
1092 status = IXGBE_ERR_SFP_NOT_PRESENT;
1097 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1098 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1100 *eeprom_data = (u8)(sfp_data >> 8);
1102 status = IXGBE_ERR_PHY;
1111 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1112 * @hw: pointer to hardware structure
1114 * Determines physical layer capabilities of the current configuration.
1116 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1118 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1119 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1120 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1121 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1122 u16 ext_ability = 0;
1124 hw->phy.ops.identify(hw);
1126 /* Copper PHY must be checked before AUTOC LMS to determine correct
1127 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1128 switch (hw->phy.type) {
1130 case ixgbe_phy_cu_unknown:
1131 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1132 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1133 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1134 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1135 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1136 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1137 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1138 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1144 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1145 case IXGBE_AUTOC_LMS_1G_AN:
1146 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1147 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1148 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1150 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1152 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1153 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1154 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1155 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1156 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1158 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1160 case IXGBE_AUTOC_LMS_KX4_AN:
1161 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1162 if (autoc & IXGBE_AUTOC_KX_SUPP)
1163 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1164 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1165 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1171 if (hw->phy.type == ixgbe_phy_nl) {
1172 hw->phy.ops.identify_sfp(hw);
1174 switch (hw->phy.sfp_type) {
1175 case ixgbe_sfp_type_da_cu:
1176 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1178 case ixgbe_sfp_type_sr:
1179 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1181 case ixgbe_sfp_type_lr:
1182 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1185 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1190 switch (hw->device_id) {
1191 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1192 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1194 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1195 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1196 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1197 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1199 case IXGBE_DEV_ID_82598EB_XF_LR:
1200 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1207 return physical_layer;
1211 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1213 * @hw: pointer to the HW structure
1215 * Calls common function and corrects issue with some single port devices
1216 * that enable LAN1 but not LAN0.
1218 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1220 struct ixgbe_bus_info *bus = &hw->bus;
1224 ixgbe_set_lan_id_multi_port_pcie(hw);
1226 /* check if LAN0 is disabled */
1227 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1228 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1230 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1232 /* if LAN0 is completely disabled force function to 0 */
1233 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1234 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1235 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1243 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1244 * @hw: pointer to hardware structure
1245 * @num_pb: number of packet buffers to allocate
1246 * @headroom: reserve n KB of headroom
1247 * @strategy: packet buffer allocation strategy
1249 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1250 u32 headroom, int strategy)
1252 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1258 /* Setup Rx packet buffer sizes */
1260 case PBA_STRATEGY_WEIGHTED:
1261 /* Setup the first four at 80KB */
1262 rxpktsize = IXGBE_RXPBSIZE_80KB;
1264 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1265 /* Setup the last four at 48KB...don't re-init i */
1266 rxpktsize = IXGBE_RXPBSIZE_48KB;
1268 case PBA_STRATEGY_EQUAL:
1270 /* Divide the remaining Rx packet buffer evenly among the TCs */
1271 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1272 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1276 /* Setup Tx packet buffer sizes */
1277 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1278 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);