1 /*******************************************************************************
3 Copyright (c) 2001-2012, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "ixgbe_type.h"
35 #include "ixgbe_api.h"
36 #include "ixgbe_common.h"
37 #include "ixgbe_phy.h"
39 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
40 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
46 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
47 bool autoneg_wait_to_complete);
48 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
49 ixgbe_link_speed *speed, bool *link_up,
50 bool link_up_wait_to_complete);
51 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
52 ixgbe_link_speed speed,
54 bool autoneg_wait_to_complete);
55 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed,
58 bool autoneg_wait_to_complete);
59 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
60 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
61 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
62 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
63 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
64 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
65 u32 vind, bool vlan_on);
66 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
67 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
68 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
69 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
71 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
72 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
73 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
75 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
76 u32 headroom, int strategy);
79 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
80 * @hw: pointer to the HW structure
82 * The defaults for 82598 should be in the range of 50us to 50ms,
83 * however the hardware default for these parts is 500us to 1ms which is less
84 * than the 10ms recommended by the pci-e spec. To address this we need to
85 * increase the value to either 10ms to 250ms for capability version 1 config,
86 * or 16ms to 55ms for version 2.
88 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
90 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
93 /* only take action if timeout value is defaulted to 0 */
94 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
98 * if capababilities version is type 1 we can write the
99 * timeout of 10ms to 250ms through the GCR register
101 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
102 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
107 * for version 2 capabilities we need to write the config space
108 * directly in order to set the completion timeout value for
111 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
112 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
113 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
115 /* disable completion timeout resend */
116 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
117 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
121 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
122 * @hw: pointer to hardware structure
124 * Read PCIe configuration space, and get the MSI-X vector count from
125 * the capabilities table.
127 u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
131 DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
133 if (hw->mac.msix_vectors_from_pcie) {
134 msix_count = IXGBE_READ_PCIE_WORD(hw,
135 IXGBE_PCIE_MSIX_82598_CAPS);
136 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
138 /* MSI-X count is zero-based in HW, so increment to give
146 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
147 * @hw: pointer to hardware structure
149 * Initialize the function pointers and assign the MAC type for 82598.
150 * Does not touch the hardware.
152 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
154 struct ixgbe_mac_info *mac = &hw->mac;
155 struct ixgbe_phy_info *phy = &hw->phy;
158 DEBUGFUNC("ixgbe_init_ops_82598");
160 ret_val = ixgbe_init_phy_ops_generic(hw);
161 ret_val = ixgbe_init_ops_generic(hw);
164 phy->ops.init = &ixgbe_init_phy_ops_82598;
167 mac->ops.start_hw = &ixgbe_start_hw_82598;
168 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
169 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
170 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
171 mac->ops.get_supported_physical_layer =
172 &ixgbe_get_supported_physical_layer_82598;
173 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
174 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
175 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
177 /* RAR, Multicast, VLAN */
178 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
179 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
180 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
181 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
184 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
186 mac->mcft_size = 128;
188 mac->num_rar_entries = 16;
189 mac->rx_pb_size = 512;
190 mac->max_tx_queues = 32;
191 mac->max_rx_queues = 64;
192 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
195 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
198 mac->ops.check_link = &ixgbe_check_mac_link_82598;
199 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
200 mac->ops.flap_tx_laser = NULL;
201 mac->ops.get_link_capabilities =
202 &ixgbe_get_link_capabilities_82598;
203 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
205 /* Manageability interface */
206 mac->ops.set_fw_drv_ver = NULL;
212 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
213 * @hw: pointer to hardware structure
215 * Initialize any function pointers that were not able to be
216 * set during init_shared_code because the PHY/SFP type was
217 * not known. Perform the SFP init if necessary.
220 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
222 struct ixgbe_mac_info *mac = &hw->mac;
223 struct ixgbe_phy_info *phy = &hw->phy;
224 s32 ret_val = IXGBE_SUCCESS;
225 u16 list_offset, data_offset;
227 DEBUGFUNC("ixgbe_init_phy_ops_82598");
229 /* Identify the PHY */
230 phy->ops.identify(hw);
232 /* Overwrite the link function pointers if copper PHY */
233 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
234 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
235 mac->ops.get_link_capabilities =
236 &ixgbe_get_copper_link_capabilities_generic;
239 switch (hw->phy.type) {
241 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
242 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
243 phy->ops.get_firmware_version =
244 &ixgbe_get_phy_firmware_version_tnx;
247 phy->ops.reset = &ixgbe_reset_phy_nl;
249 /* Call SFP+ identify routine to get the SFP+ module type */
250 ret_val = phy->ops.identify_sfp(hw);
251 if (ret_val != IXGBE_SUCCESS)
253 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
254 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
258 /* Check to see if SFP+ module is supported */
259 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
262 if (ret_val != IXGBE_SUCCESS) {
263 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
276 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
277 * @hw: pointer to hardware structure
279 * Starts the hardware using the generic start_hw function.
280 * Disables relaxed ordering Then set pcie completion timeout
283 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
287 s32 ret_val = IXGBE_SUCCESS;
289 DEBUGFUNC("ixgbe_start_hw_82598");
291 ret_val = ixgbe_start_hw_generic(hw);
293 /* Disable relaxed ordering */
294 for (i = 0; ((i < hw->mac.max_tx_queues) &&
295 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
296 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
297 regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
298 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
301 for (i = 0; ((i < hw->mac.max_rx_queues) &&
302 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
303 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
304 regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
305 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
306 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
309 /* set the completion timeout for interface */
310 if (ret_val == IXGBE_SUCCESS)
311 ixgbe_set_pcie_completion_timeout(hw);
317 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
318 * @hw: pointer to hardware structure
319 * @speed: pointer to link speed
320 * @autoneg: boolean auto-negotiation value
322 * Determines the link capabilities by reading the AUTOC register.
324 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
325 ixgbe_link_speed *speed,
328 s32 status = IXGBE_SUCCESS;
331 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
334 * Determine link capabilities based on the stored value of AUTOC,
335 * which represents EEPROM defaults. If AUTOC value has not been
336 * stored, use the current register value.
338 if (hw->mac.orig_link_settings_stored)
339 autoc = hw->mac.orig_autoc;
341 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
343 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
344 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
345 *speed = IXGBE_LINK_SPEED_1GB_FULL;
349 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
350 *speed = IXGBE_LINK_SPEED_10GB_FULL;
354 case IXGBE_AUTOC_LMS_1G_AN:
355 *speed = IXGBE_LINK_SPEED_1GB_FULL;
359 case IXGBE_AUTOC_LMS_KX4_AN:
360 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
361 *speed = IXGBE_LINK_SPEED_UNKNOWN;
362 if (autoc & IXGBE_AUTOC_KX4_SUPP)
363 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
364 if (autoc & IXGBE_AUTOC_KX_SUPP)
365 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
370 status = IXGBE_ERR_LINK_SETUP;
378 * ixgbe_get_media_type_82598 - Determines media type
379 * @hw: pointer to hardware structure
381 * Returns the media type (fiber, copper, backplane)
383 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
385 enum ixgbe_media_type media_type;
387 DEBUGFUNC("ixgbe_get_media_type_82598");
389 /* Detect if there is a copper PHY attached. */
390 switch (hw->phy.type) {
391 case ixgbe_phy_cu_unknown:
393 media_type = ixgbe_media_type_copper;
399 /* Media type for I82598 is based on device ID */
400 switch (hw->device_id) {
401 case IXGBE_DEV_ID_82598:
402 case IXGBE_DEV_ID_82598_BX:
403 /* Default device ID is mezzanine card KX/KX4 */
404 media_type = ixgbe_media_type_backplane;
406 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
407 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
408 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
409 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
410 case IXGBE_DEV_ID_82598EB_XF_LR:
411 case IXGBE_DEV_ID_82598EB_SFP_LOM:
412 media_type = ixgbe_media_type_fiber;
414 case IXGBE_DEV_ID_82598EB_CX4:
415 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
416 media_type = ixgbe_media_type_cx4;
418 case IXGBE_DEV_ID_82598AT:
419 case IXGBE_DEV_ID_82598AT2:
420 media_type = ixgbe_media_type_copper;
423 media_type = ixgbe_media_type_unknown;
431 * ixgbe_fc_enable_82598 - Enable flow control
432 * @hw: pointer to hardware structure
433 * @packetbuf_num: packet buffer number (0-7)
435 * Enable flow control according to the current settings.
437 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
439 s32 ret_val = IXGBE_SUCCESS;
446 DEBUGFUNC("ixgbe_fc_enable_82598");
449 * On 82598 having Rx FC on causes resets while doing 1G
450 * so if it's on turn it off once we know link_speed. For
451 * more details see 82598 Specification update.
453 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
454 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
455 switch (hw->fc.requested_mode) {
457 hw->fc.requested_mode = ixgbe_fc_tx_pause;
459 case ixgbe_fc_rx_pause:
460 hw->fc.requested_mode = ixgbe_fc_none;
468 /* Negotiate the fc mode to use */
469 ret_val = ixgbe_fc_autoneg(hw);
470 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
473 /* Disable any previous flow control settings */
474 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
475 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
477 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
478 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
481 * The possible values of fc.current_mode are:
482 * 0: Flow control is completely disabled
483 * 1: Rx flow control is enabled (we can receive pause frames,
484 * but not send pause frames).
485 * 2: Tx flow control is enabled (we can send pause frames but
486 * we do not support receiving pause frames).
487 * 3: Both Rx and Tx flow control (symmetric) are enabled.
490 switch (hw->fc.current_mode) {
493 * Flow control is disabled by software override or autoneg.
494 * The code below will actually disable it in the HW.
497 case ixgbe_fc_rx_pause:
499 * Rx Flow control is enabled and Tx Flow control is
500 * disabled by software override. Since there really
501 * isn't a way to advertise that we are capable of RX
502 * Pause ONLY, we will advertise that we support both
503 * symmetric and asymmetric Rx PAUSE. Later, we will
504 * disable the adapter's ability to send PAUSE frames.
506 fctrl_reg |= IXGBE_FCTRL_RFCE;
508 case ixgbe_fc_tx_pause:
510 * Tx Flow control is enabled, and Rx Flow control is
511 * disabled by software override.
513 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
516 /* Flow control (both Rx and Tx) is enabled by SW override. */
517 fctrl_reg |= IXGBE_FCTRL_RFCE;
518 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
521 DEBUGOUT("Flow control param set incorrectly\n");
522 ret_val = IXGBE_ERR_CONFIG;
527 /* Set 802.3x based flow control settings. */
528 fctrl_reg |= IXGBE_FCTRL_DPF;
529 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
530 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
532 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
533 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
534 reg = hw->fc.low_water << 6;
536 reg |= IXGBE_FCRTL_XONE;
538 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
540 reg = hw->fc.high_water[packetbuf_num] << 6;
541 reg |= IXGBE_FCRTH_FCEN;
543 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
546 /* Configure pause time (2 TCs per register) */
547 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
548 if ((packetbuf_num & 1) == 0)
549 reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
551 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
552 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
554 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
561 * ixgbe_start_mac_link_82598 - Configures MAC link settings
562 * @hw: pointer to hardware structure
564 * Configures link settings based on values in the ixgbe_hw struct.
565 * Restarts the link. Performs autonegotiation if needed.
567 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
568 bool autoneg_wait_to_complete)
573 s32 status = IXGBE_SUCCESS;
575 DEBUGFUNC("ixgbe_start_mac_link_82598");
578 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
579 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
580 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
582 /* Only poll for autoneg to complete if specified to do so */
583 if (autoneg_wait_to_complete) {
584 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
585 IXGBE_AUTOC_LMS_KX4_AN ||
586 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
587 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
588 links_reg = 0; /* Just in case Autoneg time = 0 */
589 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
590 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
591 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
595 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
596 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
597 DEBUGOUT("Autonegotiation did not complete.\n");
602 /* Add delay to filter out noises during initial link setup */
609 * ixgbe_validate_link_ready - Function looks for phy link
610 * @hw: pointer to hardware structure
612 * Function indicates success when phy link is available. If phy is not ready
613 * within 5 seconds of MAC indicating link, the function returns error.
615 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
620 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
621 return IXGBE_SUCCESS;
624 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
625 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
626 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
628 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
629 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
635 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
636 DEBUGOUT("Link was indicated but link is down\n");
637 return IXGBE_ERR_LINK_SETUP;
640 return IXGBE_SUCCESS;
644 * ixgbe_check_mac_link_82598 - Get link/speed status
645 * @hw: pointer to hardware structure
646 * @speed: pointer to link speed
647 * @link_up: TRUE is link is up, FALSE otherwise
648 * @link_up_wait_to_complete: bool used to wait for link up or not
650 * Reads the links register to determine if link is up and the current speed
652 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
653 ixgbe_link_speed *speed, bool *link_up,
654 bool link_up_wait_to_complete)
658 u16 link_reg, adapt_comp_reg;
660 DEBUGFUNC("ixgbe_check_mac_link_82598");
663 * SERDES PHY requires us to read link status from undocumented
664 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
665 * indicates link down. OxC00C is read to check that the XAUI lanes
666 * are active. Bit 0 clear indicates active; set indicates inactive.
668 if (hw->phy.type == ixgbe_phy_nl) {
669 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
670 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
671 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
673 if (link_up_wait_to_complete) {
674 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
675 if ((link_reg & 1) &&
676 ((adapt_comp_reg & 1) == 0)) {
683 hw->phy.ops.read_reg(hw, 0xC79F,
686 hw->phy.ops.read_reg(hw, 0xC00C,
691 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
697 if (*link_up == FALSE)
701 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
702 if (link_up_wait_to_complete) {
703 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
704 if (links_reg & IXGBE_LINKS_UP) {
711 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
714 if (links_reg & IXGBE_LINKS_UP)
720 if (links_reg & IXGBE_LINKS_SPEED)
721 *speed = IXGBE_LINK_SPEED_10GB_FULL;
723 *speed = IXGBE_LINK_SPEED_1GB_FULL;
725 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
726 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
729 /* if link is down, zero out the current_mode */
730 if (*link_up == FALSE) {
731 hw->fc.current_mode = ixgbe_fc_none;
732 hw->fc.fc_was_autonegged = FALSE;
735 return IXGBE_SUCCESS;
739 * ixgbe_setup_mac_link_82598 - Set MAC link speed
740 * @hw: pointer to hardware structure
741 * @speed: new link speed
742 * @autoneg: TRUE if autonegotiation enabled
743 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
745 * Set the link speed in the AUTOC register and restarts link.
747 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
748 ixgbe_link_speed speed, bool autoneg,
749 bool autoneg_wait_to_complete)
751 s32 status = IXGBE_SUCCESS;
752 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
753 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
754 u32 autoc = curr_autoc;
755 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
757 DEBUGFUNC("ixgbe_setup_mac_link_82598");
759 /* Check to see if speed passed in is supported. */
760 ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
761 speed &= link_capabilities;
763 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
764 status = IXGBE_ERR_LINK_SETUP;
766 /* Set KX4/KX support according to speed requested */
767 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
768 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
769 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
770 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
771 autoc |= IXGBE_AUTOC_KX4_SUPP;
772 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
773 autoc |= IXGBE_AUTOC_KX_SUPP;
774 if (autoc != curr_autoc)
775 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
778 if (status == IXGBE_SUCCESS) {
780 * Setup and restart the link based on the new values in
781 * ixgbe_hw This will write the AUTOC register based on the new
784 status = ixgbe_start_mac_link_82598(hw,
785 autoneg_wait_to_complete);
793 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
794 * @hw: pointer to hardware structure
795 * @speed: new link speed
796 * @autoneg: TRUE if autonegotiation enabled
797 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
799 * Sets the link speed in the AUTOC register in the MAC and restarts link.
801 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
802 ixgbe_link_speed speed,
804 bool autoneg_wait_to_complete)
808 DEBUGFUNC("ixgbe_setup_copper_link_82598");
810 /* Setup the PHY according to input speed */
811 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
812 autoneg_wait_to_complete);
814 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
820 * ixgbe_reset_hw_82598 - Performs hardware reset
821 * @hw: pointer to hardware structure
823 * Resets the hardware by resetting the transmit and receive units, masks and
824 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
827 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
829 s32 status = IXGBE_SUCCESS;
830 s32 phy_status = IXGBE_SUCCESS;
837 DEBUGFUNC("ixgbe_reset_hw_82598");
839 /* Call adapter stop to disable tx/rx and clear interrupts */
840 status = hw->mac.ops.stop_adapter(hw);
841 if (status != IXGBE_SUCCESS)
845 * Power up the Atlas Tx lanes if they are currently powered down.
846 * Atlas Tx lanes are powered down for MAC loopback tests, but
847 * they are not automatically restored on reset.
849 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
850 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
851 /* Enable Tx Atlas so packets can be transmitted again */
852 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
854 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
855 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
858 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
860 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
861 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
864 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
866 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
867 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
870 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
872 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
873 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
878 if (hw->phy.reset_disable == FALSE) {
879 /* PHY ops must be identified and initialized prior to reset */
881 /* Init PHY and function pointers, perform SFP setup */
882 phy_status = hw->phy.ops.init(hw);
883 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
885 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
888 hw->phy.ops.reset(hw);
893 * Issue global reset to the MAC. This needs to be a SW reset.
894 * If link reset is used, it might reset the MAC when mng is using it
896 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
897 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
898 IXGBE_WRITE_FLUSH(hw);
900 /* Poll for reset bit to self-clear indicating reset is complete */
901 for (i = 0; i < 10; i++) {
903 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
904 if (!(ctrl & IXGBE_CTRL_RST))
907 if (ctrl & IXGBE_CTRL_RST) {
908 status = IXGBE_ERR_RESET_FAILED;
909 DEBUGOUT("Reset polling failed to complete.\n");
915 * Double resets are required for recovery from certain error
916 * conditions. Between resets, it is necessary to stall to allow time
917 * for any pending HW events to complete.
919 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
920 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
924 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
925 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
926 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
929 * Store the original AUTOC value if it has not been
930 * stored off yet. Otherwise restore the stored original
931 * AUTOC value since the reset operation sets back to deaults.
933 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
934 if (hw->mac.orig_link_settings_stored == FALSE) {
935 hw->mac.orig_autoc = autoc;
936 hw->mac.orig_link_settings_stored = TRUE;
937 } else if (autoc != hw->mac.orig_autoc) {
938 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
941 /* Store the permanent mac address */
942 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
945 * Store MAC address from RAR0, clear receive address registers, and
946 * clear the multicast table
948 hw->mac.ops.init_rx_addrs(hw);
951 if (phy_status != IXGBE_SUCCESS)
958 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
959 * @hw: pointer to hardware struct
960 * @rar: receive address register index to associate with a VMDq index
961 * @vmdq: VMDq set index
963 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
966 u32 rar_entries = hw->mac.num_rar_entries;
968 DEBUGFUNC("ixgbe_set_vmdq_82598");
970 /* Make sure we are using a valid rar index range */
971 if (rar >= rar_entries) {
972 DEBUGOUT1("RAR index %d is out of range.\n", rar);
973 return IXGBE_ERR_INVALID_ARGUMENT;
976 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
977 rar_high &= ~IXGBE_RAH_VIND_MASK;
978 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
979 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
980 return IXGBE_SUCCESS;
984 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
985 * @hw: pointer to hardware struct
986 * @rar: receive address register index to associate with a VMDq index
987 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
989 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
992 u32 rar_entries = hw->mac.num_rar_entries;
994 UNREFERENCED_1PARAMETER(vmdq);
996 /* Make sure we are using a valid rar index range */
997 if (rar >= rar_entries) {
998 DEBUGOUT1("RAR index %d is out of range.\n", rar);
999 return IXGBE_ERR_INVALID_ARGUMENT;
1002 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
1003 if (rar_high & IXGBE_RAH_VIND_MASK) {
1004 rar_high &= ~IXGBE_RAH_VIND_MASK;
1005 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
1008 return IXGBE_SUCCESS;
1012 * ixgbe_set_vfta_82598 - Set VLAN filter table
1013 * @hw: pointer to hardware structure
1014 * @vlan: VLAN id to write to VLAN filter
1015 * @vind: VMDq output index that maps queue to VLAN id in VFTA
1016 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
1018 * Turn on/off specified VLAN in the VLAN filter table.
1020 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1028 DEBUGFUNC("ixgbe_set_vfta_82598");
1031 return IXGBE_ERR_PARAM;
1033 /* Determine 32-bit word position in array */
1034 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1036 /* Determine the location of the (VMD) queue index */
1037 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1038 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1040 /* Set the nibble for VMD queue index */
1041 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1042 bits &= (~(0x0F << bitindex));
1043 bits |= (vind << bitindex);
1044 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1046 /* Determine the location of the bit for this VLAN id */
1047 bitindex = vlan & 0x1F; /* lower five bits */
1049 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1051 /* Turn on this VLAN id */
1052 bits |= (1 << bitindex);
1054 /* Turn off this VLAN id */
1055 bits &= ~(1 << bitindex);
1056 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1058 return IXGBE_SUCCESS;
1062 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1063 * @hw: pointer to hardware structure
1065 * Clears the VLAN filer table, and the VMDq index associated with the filter
1067 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1072 DEBUGFUNC("ixgbe_clear_vfta_82598");
1074 for (offset = 0; offset < hw->mac.vft_size; offset++)
1075 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1077 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1078 for (offset = 0; offset < hw->mac.vft_size; offset++)
1079 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1082 return IXGBE_SUCCESS;
1086 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1087 * @hw: pointer to hardware structure
1088 * @reg: analog register to read
1091 * Performs read operation to Atlas analog register specified.
1093 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1097 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1099 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1100 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1101 IXGBE_WRITE_FLUSH(hw);
1103 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1104 *val = (u8)atlas_ctl;
1106 return IXGBE_SUCCESS;
1110 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1111 * @hw: pointer to hardware structure
1112 * @reg: atlas register to write
1113 * @val: value to write
1115 * Performs write operation to Atlas analog register specified.
1117 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1121 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1123 atlas_ctl = (reg << 8) | val;
1124 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1125 IXGBE_WRITE_FLUSH(hw);
1128 return IXGBE_SUCCESS;
1132 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1133 * @hw: pointer to hardware structure
1134 * @byte_offset: EEPROM byte offset to read
1135 * @eeprom_data: value read
1137 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1139 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1142 s32 status = IXGBE_SUCCESS;
1148 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1150 if (hw->phy.type == ixgbe_phy_nl) {
1152 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1153 * 0xC30D. These registers are used to talk to the SFP+
1154 * module's EEPROM through the SDA/SCL (I2C) interface.
1156 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1157 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1158 hw->phy.ops.write_reg(hw,
1159 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1160 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1164 for (i = 0; i < 100; i++) {
1165 hw->phy.ops.read_reg(hw,
1166 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1167 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1169 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1170 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1175 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1176 DEBUGOUT("EEPROM read did not pass.\n");
1177 status = IXGBE_ERR_SFP_NOT_PRESENT;
1182 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1183 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1185 *eeprom_data = (u8)(sfp_data >> 8);
1187 status = IXGBE_ERR_PHY;
1196 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1197 * @hw: pointer to hardware structure
1199 * Determines physical layer capabilities of the current configuration.
1201 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1203 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1204 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1205 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1206 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1207 u16 ext_ability = 0;
1209 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1211 hw->phy.ops.identify(hw);
1213 /* Copper PHY must be checked before AUTOC LMS to determine correct
1214 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1215 switch (hw->phy.type) {
1217 case ixgbe_phy_cu_unknown:
1218 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1219 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1220 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1221 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1222 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1223 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1224 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1225 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1231 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1232 case IXGBE_AUTOC_LMS_1G_AN:
1233 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1234 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1235 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1237 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1239 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1240 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1241 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1242 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1243 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1245 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1247 case IXGBE_AUTOC_LMS_KX4_AN:
1248 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1249 if (autoc & IXGBE_AUTOC_KX_SUPP)
1250 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1251 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1252 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1258 if (hw->phy.type == ixgbe_phy_nl) {
1259 hw->phy.ops.identify_sfp(hw);
1261 switch (hw->phy.sfp_type) {
1262 case ixgbe_sfp_type_da_cu:
1263 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1265 case ixgbe_sfp_type_sr:
1266 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1268 case ixgbe_sfp_type_lr:
1269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1272 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1277 switch (hw->device_id) {
1278 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1279 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1281 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1282 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1283 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1284 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1286 case IXGBE_DEV_ID_82598EB_XF_LR:
1287 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1294 return physical_layer;
1298 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1300 * @hw: pointer to the HW structure
1302 * Calls common function and corrects issue with some single port devices
1303 * that enable LAN1 but not LAN0.
1305 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1307 struct ixgbe_bus_info *bus = &hw->bus;
1311 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1313 ixgbe_set_lan_id_multi_port_pcie(hw);
1315 /* check if LAN0 is disabled */
1316 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1317 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1319 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1321 /* if LAN0 is completely disabled force function to 0 */
1322 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1323 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1324 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1332 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1333 * @hw: pointer to hardware structure
1336 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1341 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1343 /* Enable relaxed ordering */
1344 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1345 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1346 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1347 regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1348 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1351 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1352 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1353 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1354 regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
1355 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
1356 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1362 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1363 * @hw: pointer to hardware structure
1364 * @num_pb: number of packet buffers to allocate
1365 * @headroom: reserve n KB of headroom
1366 * @strategy: packet buffer allocation strategy
1368 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1369 u32 headroom, int strategy)
1371 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1373 UNREFERENCED_1PARAMETER(headroom);
1378 /* Setup Rx packet buffer sizes */
1380 case PBA_STRATEGY_WEIGHTED:
1381 /* Setup the first four at 80KB */
1382 rxpktsize = IXGBE_RXPBSIZE_80KB;
1384 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1385 /* Setup the last four at 48KB...don't re-init i */
1386 rxpktsize = IXGBE_RXPBSIZE_48KB;
1388 case PBA_STRATEGY_EQUAL:
1390 /* Divide the remaining Rx packet buffer evenly among the TCs */
1391 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1392 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1396 /* Setup Tx packet buffer sizes */
1397 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1398 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);