* applicable to E822 devices.
*/
static enum ice_status
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
enum ice_status status;
* applicable to E822 devices.
*/
static enum ice_status
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
enum ice_status status;
#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
/* Program the 10Gb/40Gb conversion ratio */
- uix = (tu_per_sec * LINE_UI_10G_40G) / 390625000;
+ uix = DIV_64BIT(tu_per_sec * LINE_UI_10G_40G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
uix);
}
/* Program the 25Gb/100Gb conversion ratio */
- uix = (tu_per_sec * LINE_UI_25G_100G) / 390625000;
+ uix = DIV_64BIT(tu_per_sec * LINE_UI_25G_100G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
uix);
/* P_REG_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_par_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_par_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
else
phy_tus = 0;
/* P_REG_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_par_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_par_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
else
phy_tus = 0;
/* P_REG_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_pcs_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_pcs_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
else
phy_tus = 0;
/* P_REG_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_pcs_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_pcs_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
else
phy_tus = 0;
/* P_REG_DESK_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_par)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_desk_rsgb_par;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
else
phy_tus = 0;
/* P_REG_DESK_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_par)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_desk_rsgb_par;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
else
phy_tus = 0;
/* P_REG_DESK_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_desk_rsgb_pcs;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
else
phy_tus = 0;
/* P_REG_DESK_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_desk_rsgb_pcs;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
else
phy_tus = 0;
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = tu_per_sec / 10000;
+ fixed_offset = DIV_64BIT(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
- fixed_offset /= 10000000;
+ fixed_offset = DIV_64BIT(fixed_offset, 10000000);
return fixed_offset;
}
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u32 pmd_adj_divisor, val;
enum ice_status status;
u8 pmd_align;
- u32 val;
status = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (status) {
/* Calculate TUs per second */
tu_per_sec = cur_freq * clk_incval;
+ /* Get the link speed dependent PMD adjustment divisor */
+ pmd_adj_divisor = e822_vernier[link_spd].pmd_adj_divisor;
+
/* The PMD alignment adjustment measurement depends on the link speed,
* and whether FEC is enabled. For each link speed, the alignment
* adjustment is calculated by dividing a value by the length of
* divide by 125, and then handle remaining divisor based on the link
* speed pmd_adj_divisor value.
*/
- adj = tu_per_sec / 125;
+ adj = DIV_64BIT(tu_per_sec, 125);
adj *= mult;
- adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ adj = DIV_64BIT(adj, pmd_adj_divisor);
/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
* cycle count is necessary.
if (rx_cycle) {
mult = (4 - rx_cycle) * 40;
- cycle_adj = tu_per_sec / 125;
+ cycle_adj = DIV_64BIT(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
if (rx_cycle) {
mult = rx_cycle * 40;
- cycle_adj = tu_per_sec / 125;
+ cycle_adj = DIV_64BIT(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = tu_per_sec / 10000;
+ fixed_offset = DIV_64BIT(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
- fixed_offset /= 10000000;
+ fixed_offset = DIV_64BIT(fixed_offset, 10000000);
return fixed_offset;
}
return ICE_SUCCESS;
}
+/**
+ * ice_phy_exit_bypass_e822 - Exit bypass mode, after vernier calculations
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to configure
+ *
+ * After hardware finishes vernier calculations for the Tx and Rx offset, this
+ * function can be used to exit bypass mode by updating the total Tx and Rx
+ * offsets, and then disabling bypass. This will enable hardware to include
+ * the more precise offset calibrations, increasing precision of the generated
+ * timestamps.
+ *
+ * This cannot be done until hardware has measured the offsets, which requires
+ * waiting until at least one packet has been sent and received by the device.
+ */
+enum ice_status ice_phy_exit_bypass_e822(struct ice_hw *hw, u8 port)
+{
+ enum ice_status status;
+ u32 val;
+
+ status = ice_read_phy_reg_e822(hw, port, P_REG_TX_OV_STATUS, &val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_OV_STATUS for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Tx offset is not yet valid for port %u\n",
+ port);
+ return ICE_ERR_NOT_READY;
+ }
+
+ status = ice_read_phy_reg_e822(hw, port, P_REG_RX_OV_STATUS, &val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_OV_STATUS for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ if (!(val & P_REG_TX_OV_STATUS_OV_M)) {
+ ice_debug(hw, ICE_DBG_PTP, "Rx offset is not yet valid for port %u\n",
+ port);
+ return ICE_ERR_NOT_READY;
+ }
+
+ status = ice_phy_cfg_tx_offset_e822(hw, port);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Tx offset for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ status = ice_phy_cfg_rx_offset_e822(hw, port);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to program total Rx offset for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ /* Exit bypass mode now that the offset has been updated */
+ status = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read P_REG_PS for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ if (!(val & P_REG_PS_BYPASS_MODE_M))
+ ice_debug(hw, ICE_DBG_PTP, "Port %u not in bypass mode\n",
+ port);
+
+ val &= ~P_REG_PS_BYPASS_MODE_M;
+ status = ice_write_phy_reg_e822(hw, port, P_REG_PS, val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to disable bypass for port %u, status %d\n",
+ port, status);
+ return status;
+ }
+
+ ice_info(hw, "Exiting bypass mode on PHY port %u\n", port);
+
+ return ICE_SUCCESS;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use