1 /******************************************************************************
3 Copyright (c) 2001-2010, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
35 #include "ixgbe_type.h"
36 #include "ixgbe_api.h"
37 #include "ixgbe_common.h"
38 #include "ixgbe_phy.h"
40 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
41 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
42 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
43 ixgbe_link_speed *speed,
45 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
46 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
47 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
48 bool autoneg_wait_to_complete);
49 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
50 ixgbe_link_speed *speed, bool *link_up,
51 bool link_up_wait_to_complete);
52 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
53 ixgbe_link_speed speed,
55 bool autoneg_wait_to_complete);
56 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
57 ixgbe_link_speed speed,
59 bool autoneg_wait_to_complete);
60 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
61 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
62 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
63 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
64 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
65 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
66 u32 vind, bool vlan_on);
67 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
68 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
69 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
70 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
72 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
73 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
74 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
75 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
76 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
77 u32 headroom, int strategy);
80 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
81 * @hw: pointer to the HW structure
83 * The defaults for 82598 should be in the range of 50us to 50ms,
84 * however the hardware default for these parts is 500us to 1ms which is less
85 * than the 10ms recommended by the pci-e spec. To address this we need to
86 * increase the value to either 10ms to 250ms for capability version 1 config,
87 * or 16ms to 55ms for version 2.
89 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
91 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
94 /* only take action if timeout value is defaulted to 0 */
95 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
99 * if capababilities version is type 1 we can write the
100 * timeout of 10ms to 250ms through the GCR register
102 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
103 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
108 * for version 2 capabilities we need to write the config space
109 * directly in order to set the completion timeout value for
112 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
113 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
114 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
116 /* disable completion timeout resend */
117 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
118 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
122 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
123 * @hw: pointer to hardware structure
125 * Read PCIe configuration space, and get the MSI-X vector count from
126 * the capabilities table.
128 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
132 DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
134 if (hw->mac.msix_vectors_from_pcie) {
135 msix_count = IXGBE_READ_PCIE_WORD(hw,
136 IXGBE_PCIE_MSIX_82598_CAPS);
137 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
139 /* MSI-X count is zero-based in HW, so increment to give
147 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
148 * @hw: pointer to hardware structure
150 * Initialize the function pointers and assign the MAC type for 82598.
151 * Does not touch the hardware.
153 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
155 struct ixgbe_mac_info *mac = &hw->mac;
156 struct ixgbe_phy_info *phy = &hw->phy;
159 DEBUGFUNC("ixgbe_init_ops_82598");
161 ret_val = ixgbe_init_phy_ops_generic(hw);
162 ret_val = ixgbe_init_ops_generic(hw);
165 phy->ops.init = &ixgbe_init_phy_ops_82598;
168 mac->ops.start_hw = &ixgbe_start_hw_82598;
169 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
170 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
171 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
172 mac->ops.get_supported_physical_layer =
173 &ixgbe_get_supported_physical_layer_82598;
174 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
175 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
176 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
178 /* RAR, Multicast, VLAN */
179 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
180 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
181 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
182 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
185 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
187 mac->mcft_size = 128;
189 mac->num_rar_entries = 16;
190 mac->rx_pb_size = 512;
191 mac->max_tx_queues = 32;
192 mac->max_rx_queues = 64;
193 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
196 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
199 mac->ops.check_link = &ixgbe_check_mac_link_82598;
200 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
201 mac->ops.flap_tx_laser = NULL;
202 mac->ops.get_link_capabilities =
203 &ixgbe_get_link_capabilities_82598;
204 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
206 /* Manageability interface */
207 mac->ops.set_fw_drv_ver = NULL;
213 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
214 * @hw: pointer to hardware structure
216 * Initialize any function pointers that were not able to be
217 * set during init_shared_code because the PHY/SFP type was
218 * not known. Perform the SFP init if necessary.
221 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
223 struct ixgbe_mac_info *mac = &hw->mac;
224 struct ixgbe_phy_info *phy = &hw->phy;
225 s32 ret_val = IXGBE_SUCCESS;
226 u16 list_offset, data_offset;
228 DEBUGFUNC("ixgbe_init_phy_ops_82598");
230 /* Identify the PHY */
231 phy->ops.identify(hw);
233 /* Overwrite the link function pointers if copper PHY */
234 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
235 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
236 mac->ops.get_link_capabilities =
237 &ixgbe_get_copper_link_capabilities_generic;
240 switch (hw->phy.type) {
242 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
243 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
244 phy->ops.get_firmware_version =
245 &ixgbe_get_phy_firmware_version_tnx;
248 phy->ops.reset = &ixgbe_reset_phy_nl;
250 /* Call SFP+ identify routine to get the SFP+ module type */
251 ret_val = phy->ops.identify_sfp(hw);
252 if (ret_val != IXGBE_SUCCESS)
254 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
255 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
259 /* Check to see if SFP+ module is supported */
260 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
263 if (ret_val != IXGBE_SUCCESS) {
264 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
277 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
278 * @hw: pointer to hardware structure
280 * Starts the hardware using the generic start_hw function.
281 * Disables relaxed ordering Then set pcie completion timeout
284 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
288 s32 ret_val = IXGBE_SUCCESS;
290 DEBUGFUNC("ixgbe_start_hw_82598");
292 ret_val = ixgbe_start_hw_generic(hw);
294 /* Disable relaxed ordering */
295 for (i = 0; ((i < hw->mac.max_tx_queues) &&
296 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
297 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
298 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
299 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
302 for (i = 0; ((i < hw->mac.max_rx_queues) &&
303 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
304 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
305 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
306 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
307 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
310 /* set the completion timeout for interface */
311 if (ret_val == IXGBE_SUCCESS)
312 ixgbe_set_pcie_completion_timeout(hw);
318 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
319 * @hw: pointer to hardware structure
320 * @speed: pointer to link speed
321 * @autoneg: boolean auto-negotiation value
323 * Determines the link capabilities by reading the AUTOC register.
325 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
326 ixgbe_link_speed *speed,
329 s32 status = IXGBE_SUCCESS;
332 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
335 * Determine link capabilities based on the stored value of AUTOC,
336 * which represents EEPROM defaults. If AUTOC value has not been
337 * stored, use the current register value.
339 if (hw->mac.orig_link_settings_stored)
340 autoc = hw->mac.orig_autoc;
342 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
344 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
345 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
346 *speed = IXGBE_LINK_SPEED_1GB_FULL;
350 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
351 *speed = IXGBE_LINK_SPEED_10GB_FULL;
355 case IXGBE_AUTOC_LMS_1G_AN:
356 *speed = IXGBE_LINK_SPEED_1GB_FULL;
360 case IXGBE_AUTOC_LMS_KX4_AN:
361 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
362 *speed = IXGBE_LINK_SPEED_UNKNOWN;
363 if (autoc & IXGBE_AUTOC_KX4_SUPP)
364 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
365 if (autoc & IXGBE_AUTOC_KX_SUPP)
366 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
371 status = IXGBE_ERR_LINK_SETUP;
379 * ixgbe_get_media_type_82598 - Determines media type
380 * @hw: pointer to hardware structure
382 * Returns the media type (fiber, copper, backplane)
384 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
386 enum ixgbe_media_type media_type;
388 DEBUGFUNC("ixgbe_get_media_type_82598");
390 /* Detect if there is a copper PHY attached. */
391 switch (hw->phy.type) {
392 case ixgbe_phy_cu_unknown:
394 media_type = ixgbe_media_type_copper;
400 /* Media type for I82598 is based on device ID */
401 switch (hw->device_id) {
402 case IXGBE_DEV_ID_82598:
403 case IXGBE_DEV_ID_82598_BX:
404 /* Default device ID is mezzanine card KX/KX4 */
405 media_type = ixgbe_media_type_backplane;
407 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
408 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
409 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
410 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
411 case IXGBE_DEV_ID_82598EB_XF_LR:
412 case IXGBE_DEV_ID_82598EB_SFP_LOM:
413 media_type = ixgbe_media_type_fiber;
415 case IXGBE_DEV_ID_82598EB_CX4:
416 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
417 media_type = ixgbe_media_type_cx4;
419 case IXGBE_DEV_ID_82598AT:
420 case IXGBE_DEV_ID_82598AT2:
421 media_type = ixgbe_media_type_copper;
424 media_type = ixgbe_media_type_unknown;
432 * ixgbe_fc_enable_82598 - Enable flow control
433 * @hw: pointer to hardware structure
434 * @packetbuf_num: packet buffer number (0-7)
436 * Enable flow control according to the current settings.
438 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
440 s32 ret_val = IXGBE_SUCCESS;
447 DEBUGFUNC("ixgbe_fc_enable_82598");
450 * On 82598 having Rx FC on causes resets while doing 1G
451 * so if it's on turn it off once we know link_speed. For
452 * more details see 82598 Specification update.
454 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
455 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
456 switch (hw->fc.requested_mode) {
458 hw->fc.requested_mode = ixgbe_fc_tx_pause;
460 case ixgbe_fc_rx_pause:
461 hw->fc.requested_mode = ixgbe_fc_none;
469 /* Negotiate the fc mode to use */
470 ret_val = ixgbe_fc_autoneg(hw);
471 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
474 /* Disable any previous flow control settings */
475 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
476 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
478 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
479 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
482 * The possible values of fc.current_mode are:
483 * 0: Flow control is completely disabled
484 * 1: Rx flow control is enabled (we can receive pause frames,
485 * but not send pause frames).
486 * 2: Tx flow control is enabled (we can send pause frames but
487 * we do not support receiving pause frames).
488 * 3: Both Rx and Tx flow control (symmetric) are enabled.
491 switch (hw->fc.current_mode) {
494 * Flow control is disabled by software override or autoneg.
495 * The code below will actually disable it in the HW.
498 case ixgbe_fc_rx_pause:
500 * Rx Flow control is enabled and Tx Flow control is
501 * disabled by software override. Since there really
502 * isn't a way to advertise that we are capable of RX
503 * Pause ONLY, we will advertise that we support both
504 * symmetric and asymmetric Rx PAUSE. Later, we will
505 * disable the adapter's ability to send PAUSE frames.
507 fctrl_reg |= IXGBE_FCTRL_RFCE;
509 case ixgbe_fc_tx_pause:
511 * Tx Flow control is enabled, and Rx Flow control is
512 * disabled by software override.
514 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
517 /* Flow control (both Rx and Tx) is enabled by SW override. */
518 fctrl_reg |= IXGBE_FCTRL_RFCE;
519 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
522 DEBUGOUT("Flow control param set incorrectly\n");
523 ret_val = IXGBE_ERR_CONFIG;
528 /* Set 802.3x based flow control settings. */
529 fctrl_reg |= IXGBE_FCTRL_DPF;
530 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
531 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
533 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
534 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
535 reg = hw->fc.low_water << 6;
537 reg |= IXGBE_FCRTL_XONE;
539 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
541 reg = hw->fc.high_water[packetbuf_num] << 6;
542 reg |= IXGBE_FCRTH_FCEN;
544 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
547 /* Configure pause time (2 TCs per register) */
548 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
549 if ((packetbuf_num & 1) == 0)
550 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
552 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
553 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
555 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
562 * ixgbe_start_mac_link_82598 - Configures MAC link settings
563 * @hw: pointer to hardware structure
565 * Configures link settings based on values in the ixgbe_hw struct.
566 * Restarts the link. Performs autonegotiation if needed.
568 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
569 bool autoneg_wait_to_complete)
574 s32 status = IXGBE_SUCCESS;
576 DEBUGFUNC("ixgbe_start_mac_link_82598");
579 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
580 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
581 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
583 /* Only poll for autoneg to complete if specified to do so */
584 if (autoneg_wait_to_complete) {
585 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
586 IXGBE_AUTOC_LMS_KX4_AN ||
587 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
588 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
589 links_reg = 0; /* Just in case Autoneg time = 0 */
590 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
591 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
592 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
596 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
597 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
598 DEBUGOUT("Autonegotiation did not complete.\n");
603 /* Add delay to filter out noises during initial link setup */
610 * ixgbe_validate_link_ready - Function looks for phy link
611 * @hw: pointer to hardware structure
613 * Function indicates success when phy link is available. If phy is not ready
614 * within 5 seconds of MAC indicating link, the function returns error.
616 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
621 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
622 return IXGBE_SUCCESS;
625 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
626 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
627 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
629 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
630 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
636 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
637 DEBUGOUT("Link was indicated but link is down\n");
638 return IXGBE_ERR_LINK_SETUP;
641 return IXGBE_SUCCESS;
645 * ixgbe_check_mac_link_82598 - Get link/speed status
646 * @hw: pointer to hardware structure
647 * @speed: pointer to link speed
648 * @link_up: TRUE is link is up, FALSE otherwise
649 * @link_up_wait_to_complete: bool used to wait for link up or not
651 * Reads the links register to determine if link is up and the current speed
653 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
654 ixgbe_link_speed *speed, bool *link_up,
655 bool link_up_wait_to_complete)
659 u16 link_reg, adapt_comp_reg;
661 DEBUGFUNC("ixgbe_check_mac_link_82598");
664 * SERDES PHY requires us to read link status from undocumented
665 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
666 * indicates link down. OxC00C is read to check that the XAUI lanes
667 * are active. Bit 0 clear indicates active; set indicates inactive.
669 if (hw->phy.type == ixgbe_phy_nl) {
670 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
671 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
672 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
674 if (link_up_wait_to_complete) {
675 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
676 if ((link_reg & 1) &&
677 ((adapt_comp_reg & 1) == 0)) {
684 hw->phy.ops.read_reg(hw, 0xC79F,
687 hw->phy.ops.read_reg(hw, 0xC00C,
692 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
698 if (*link_up == FALSE)
702 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
703 if (link_up_wait_to_complete) {
704 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
705 if (links_reg & IXGBE_LINKS_UP) {
712 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
715 if (links_reg & IXGBE_LINKS_UP)
721 if (links_reg & IXGBE_LINKS_SPEED)
722 *speed = IXGBE_LINK_SPEED_10GB_FULL;
724 *speed = IXGBE_LINK_SPEED_1GB_FULL;
726 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
727 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
730 /* if link is down, zero out the current_mode */
731 if (*link_up == FALSE) {
732 hw->fc.current_mode = ixgbe_fc_none;
733 hw->fc.fc_was_autonegged = FALSE;
736 return IXGBE_SUCCESS;
740 * ixgbe_setup_mac_link_82598 - Set MAC link speed
741 * @hw: pointer to hardware structure
742 * @speed: new link speed
743 * @autoneg: TRUE if autonegotiation enabled
744 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
746 * Set the link speed in the AUTOC register and restarts link.
748 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
749 ixgbe_link_speed speed, bool autoneg,
750 bool autoneg_wait_to_complete)
752 s32 status = IXGBE_SUCCESS;
753 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
754 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
755 u32 autoc = curr_autoc;
756 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
758 DEBUGFUNC("ixgbe_setup_mac_link_82598");
760 /* Check to see if speed passed in is supported. */
761 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
762 speed &= link_capabilities;
764 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
765 status = IXGBE_ERR_LINK_SETUP;
767 /* Set KX4/KX support according to speed requested */
768 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
769 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
770 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
771 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
772 autoc |= IXGBE_AUTOC_KX4_SUPP;
773 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
774 autoc |= IXGBE_AUTOC_KX_SUPP;
775 if (autoc != curr_autoc)
776 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
779 if (status == IXGBE_SUCCESS) {
781 * Setup and restart the link based on the new values in
782 * ixgbe_hw This will write the AUTOC register based on the new
785 status = ixgbe_start_mac_link_82598(hw,
786 autoneg_wait_to_complete);
794 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
795 * @hw: pointer to hardware structure
796 * @speed: new link speed
797 * @autoneg: TRUE if autonegotiation enabled
798 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
800 * Sets the link speed in the AUTOC register in the MAC and restarts link.
802 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
803 ixgbe_link_speed speed,
805 bool autoneg_wait_to_complete)
809 DEBUGFUNC("ixgbe_setup_copper_link_82598");
811 /* Setup the PHY according to input speed */
812 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
813 autoneg_wait_to_complete);
815 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
821 * ixgbe_reset_hw_82598 - Performs hardware reset
822 * @hw: pointer to hardware structure
824 * Resets the hardware by resetting the transmit and receive units, masks and
825 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
828 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
830 s32 status = IXGBE_SUCCESS;
831 s32 phy_status = IXGBE_SUCCESS;
838 DEBUGFUNC("ixgbe_reset_hw_82598");
840 /* Call adapter stop to disable tx/rx and clear interrupts */
841 status = hw->mac.ops.stop_adapter(hw);
842 if (status != IXGBE_SUCCESS)
846 * Power up the Atlas Tx lanes if they are currently powered down.
847 * Atlas Tx lanes are powered down for MAC loopback tests, but
848 * they are not automatically restored on reset.
850 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
851 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
852 /* Enable Tx Atlas so packets can be transmitted again */
853 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
855 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
856 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
859 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
861 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
862 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
865 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
867 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
868 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
871 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
873 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
874 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
879 if (hw->phy.reset_disable == FALSE) {
880 /* PHY ops must be identified and initialized prior to reset */
882 /* Init PHY and function pointers, perform SFP setup */
883 phy_status = hw->phy.ops.init(hw);
884 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
886 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
889 hw->phy.ops.reset(hw);
894 * Issue global reset to the MAC. This needs to be a SW reset.
895 * If link reset is used, it might reset the MAC when mng is using it
897 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
898 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
899 IXGBE_WRITE_FLUSH(hw);
901 /* Poll for reset bit to self-clear indicating reset is complete */
902 for (i = 0; i < 10; i++) {
904 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
905 if (!(ctrl & IXGBE_CTRL_RST))
908 if (ctrl & IXGBE_CTRL_RST) {
909 status = IXGBE_ERR_RESET_FAILED;
910 DEBUGOUT("Reset polling failed to complete.\n");
916 * Double resets are required for recovery from certain error
917 * conditions. Between resets, it is necessary to stall to allow time
918 * for any pending HW events to complete.
920 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
921 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
925 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
926 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
927 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
930 * Store the original AUTOC value if it has not been
931 * stored off yet. Otherwise restore the stored original
932 * AUTOC value since the reset operation sets back to deaults.
934 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
935 if (hw->mac.orig_link_settings_stored == FALSE) {
936 hw->mac.orig_autoc = autoc;
937 hw->mac.orig_link_settings_stored = TRUE;
938 } else if (autoc != hw->mac.orig_autoc) {
939 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
942 /* Store the permanent mac address */
943 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
946 * Store MAC address from RAR0, clear receive address registers, and
947 * clear the multicast table
949 hw->mac.ops.init_rx_addrs(hw);
952 if (phy_status != IXGBE_SUCCESS)
959 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
960 * @hw: pointer to hardware struct
961 * @rar: receive address register index to associate with a VMDq index
962 * @vmdq: VMDq set index
964 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
967 u32 rar_entries = hw->mac.num_rar_entries;
969 DEBUGFUNC("ixgbe_set_vmdq_82598");
971 /* Make sure we are using a valid rar index range */
972 if (rar >= rar_entries) {
973 DEBUGOUT1("RAR index %d is out of range.\n", rar);
974 return IXGBE_ERR_INVALID_ARGUMENT;
977 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
978 rar_high &= ~IXGBE_RAH_VIND_MASK;
979 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
980 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
981 return IXGBE_SUCCESS;
985 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
986 * @hw: pointer to hardware struct
987 * @rar: receive address register index to associate with a VMDq index
988 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
990 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
993 u32 rar_entries = hw->mac.num_rar_entries;
995 UNREFERENCED_1PARAMETER(vmdq);
997 /* Make sure we are using a valid rar index range */
998 if (rar >= rar_entries) {
999 DEBUGOUT1("RAR index %d is out of range.\n", rar);
1000 return IXGBE_ERR_INVALID_ARGUMENT;
1003 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1004 if (rar_high & IXGBE_RAH_VIND_MASK) {
1005 rar_high &= ~IXGBE_RAH_VIND_MASK;
1006 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
1009 return IXGBE_SUCCESS;
1013 * ixgbe_set_vfta_82598 - Set VLAN filter table
1014 * @hw: pointer to hardware structure
1015 * @vlan: VLAN id to write to VLAN filter
1016 * @vind: VMDq output index that maps queue to VLAN id in VFTA
1017 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1019 * Turn on/off specified VLAN in the VLAN filter table.
1021 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1029 DEBUGFUNC("ixgbe_set_vfta_82598");
1032 return IXGBE_ERR_PARAM;
1034 /* Determine 32-bit word position in array */
1035 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1037 /* Determine the location of the (VMD) queue index */
1038 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1039 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1041 /* Set the nibble for VMD queue index */
1042 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1043 bits &= (~(0x0F << bitindex));
1044 bits |= (vind << bitindex);
1045 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1047 /* Determine the location of the bit for this VLAN id */
1048 bitindex = vlan & 0x1F; /* lower five bits */
1050 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1052 /* Turn on this VLAN id */
1053 bits |= (1 << bitindex);
1055 /* Turn off this VLAN id */
1056 bits &= ~(1 << bitindex);
1057 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1059 return IXGBE_SUCCESS;
1063 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1064 * @hw: pointer to hardware structure
1066 * Clears the VLAN filer table, and the VMDq index associated with the filter
1068 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1073 DEBUGFUNC("ixgbe_clear_vfta_82598");
1075 for (offset = 0; offset < hw->mac.vft_size; offset++)
1076 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1078 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1079 for (offset = 0; offset < hw->mac.vft_size; offset++)
1080 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1083 return IXGBE_SUCCESS;
1087 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1088 * @hw: pointer to hardware structure
1089 * @reg: analog register to read
1092 * Performs read operation to Atlas analog register specified.
1094 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1098 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1100 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1101 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1102 IXGBE_WRITE_FLUSH(hw);
1104 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1105 *val = (u8)atlas_ctl;
1107 return IXGBE_SUCCESS;
1111 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1112 * @hw: pointer to hardware structure
1113 * @reg: atlas register to write
1114 * @val: value to write
1116 * Performs write operation to Atlas analog register specified.
1118 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1122 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1124 atlas_ctl = (reg << 8) | val;
1125 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1126 IXGBE_WRITE_FLUSH(hw);
1129 return IXGBE_SUCCESS;
1133 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1134 * @hw: pointer to hardware structure
1135 * @byte_offset: EEPROM byte offset to read
1136 * @eeprom_data: value read
1138 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1140 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1143 s32 status = IXGBE_SUCCESS;
1149 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1151 if (hw->phy.type == ixgbe_phy_nl) {
1153 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1154 * 0xC30D. These registers are used to talk to the SFP+
1155 * module's EEPROM through the SDA/SCL (I2C) interface.
1157 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1158 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1159 hw->phy.ops.write_reg(hw,
1160 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1161 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1165 for (i = 0; i < 100; i++) {
1166 hw->phy.ops.read_reg(hw,
1167 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1168 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1170 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1171 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1176 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1177 DEBUGOUT("EEPROM read did not pass.\n");
1178 status = IXGBE_ERR_SFP_NOT_PRESENT;
1183 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1184 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1186 *eeprom_data = (u8)(sfp_data >> 8);
1188 status = IXGBE_ERR_PHY;
1197 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1198 * @hw: pointer to hardware structure
1200 * Determines physical layer capabilities of the current configuration.
1202 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1204 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1205 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1206 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1207 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1208 u16 ext_ability = 0;
1210 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1212 hw->phy.ops.identify(hw);
1214 /* Copper PHY must be checked before AUTOC LMS to determine correct
1215 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1216 switch (hw->phy.type) {
1218 case ixgbe_phy_cu_unknown:
1219 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1220 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1221 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1222 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1223 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1224 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1225 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1226 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1232 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1233 case IXGBE_AUTOC_LMS_1G_AN:
1234 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1235 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1236 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1238 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1240 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1241 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1242 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1243 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1244 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1246 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1248 case IXGBE_AUTOC_LMS_KX4_AN:
1249 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1250 if (autoc & IXGBE_AUTOC_KX_SUPP)
1251 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1252 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1253 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1259 if (hw->phy.type == ixgbe_phy_nl) {
1260 hw->phy.ops.identify_sfp(hw);
1262 switch (hw->phy.sfp_type) {
1263 case ixgbe_sfp_type_da_cu:
1264 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1266 case ixgbe_sfp_type_sr:
1267 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1269 case ixgbe_sfp_type_lr:
1270 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1273 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1278 switch (hw->device_id) {
1279 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1280 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1282 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1283 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1284 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1285 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1287 case IXGBE_DEV_ID_82598EB_XF_LR:
1288 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1295 return physical_layer;
1299 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1301 * @hw: pointer to the HW structure
1303 * Calls common function and corrects issue with some single port devices
1304 * that enable LAN1 but not LAN0.
1306 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1308 struct ixgbe_bus_info *bus = &hw->bus;
1312 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1314 ixgbe_set_lan_id_multi_port_pcie(hw);
1316 /* check if LAN0 is disabled */
1317 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1318 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1320 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1322 /* if LAN0 is completely disabled force function to 0 */
1323 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1324 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1325 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1333 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1334 * @hw: pointer to hardware structure
1337 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1342 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1344 /* Enable relaxed ordering */
1345 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1346 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1347 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1348 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1349 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1352 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1353 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1354 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1355 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1356 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1357 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1363 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1364 * @hw: pointer to hardware structure
1365 * @num_pb: number of packet buffers to allocate
1366 * @headroom: reserve n KB of headroom
1367 * @strategy: packet buffer allocation strategy
1369 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1370 u32 headroom, int strategy)
1372 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1374 UNREFERENCED_1PARAMETER(headroom);
1379 /* Setup Rx packet buffer sizes */
1381 case PBA_STRATEGY_WEIGHTED:
1382 /* Setup the first four at 80KB */
1383 rxpktsize = IXGBE_RXPBSIZE_80KB;
1385 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1386 /* Setup the last four at 48KB...don't re-init i */
1387 rxpktsize = IXGBE_RXPBSIZE_48KB;
1389 case PBA_STRATEGY_EQUAL:
1391 /* Divide the remaining Rx packet buffer evenly among the TCs */
1392 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1393 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1397 /* Setup Tx packet buffer sizes */
1398 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1399 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);