* applicable to E822 devices.
*/
static enum ice_status
-ice_read_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 *val)
+ice_read_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 *val)
{
struct ice_sbq_msg_input cgu_msg;
enum ice_status status;
* applicable to E822 devices.
*/
static enum ice_status
-ice_write_cgu_reg_e822(struct ice_hw *hw, u32 addr, u32 val)
+ice_write_cgu_reg_e822(struct ice_hw *hw, u16 addr, u32 val)
{
struct ice_sbq_msg_input cgu_msg;
enum ice_status status;
#define LINE_UI_25G_100G 256 /* 6600 UIs is 256 nanoseconds at 25Gb/100Gb */
/* Program the 10Gb/40Gb conversion ratio */
- uix = (tu_per_sec * LINE_UI_10G_40G) / 390625000;
+ uix = DIV_64BIT(tu_per_sec * LINE_UI_10G_40G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_10G_40G_L,
uix);
}
/* Program the 25Gb/100Gb conversion ratio */
- uix = (tu_per_sec * LINE_UI_25G_100G) / 390625000;
+ uix = DIV_64BIT(tu_per_sec * LINE_UI_25G_100G, 390625000);
status = ice_write_64b_phy_reg_e822(hw, port, P_REG_UIX66_25G_100G_L,
uix);
/* P_REG_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_par_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_par_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_par_clk);
else
phy_tus = 0;
/* P_REG_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_par_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_par_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_par_clk);
else
phy_tus = 0;
/* P_REG_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_pcs_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_pcs_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_pcs_clk);
else
phy_tus = 0;
/* P_REG_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_pcs_clk)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_pcs_clk;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_pcs_clk);
else
phy_tus = 0;
/* P_REG_DESK_PAR_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_par)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_desk_rsgb_par;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_par);
else
phy_tus = 0;
/* P_REG_DESK_PAR_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_par)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_desk_rsgb_par;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_par);
else
phy_tus = 0;
/* P_REG_DESK_PCS_TX_TUS */
if (e822_vernier[link_spd].tx_desk_rsgb_pcs)
- phy_tus = tu_per_sec / e822_vernier[link_spd].tx_desk_rsgb_pcs;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].tx_desk_rsgb_pcs);
else
phy_tus = 0;
/* P_REG_DESK_PCS_RX_TUS */
if (e822_vernier[link_spd].rx_desk_rsgb_pcs)
- phy_tus = tu_per_sec / e822_vernier[link_spd].rx_desk_rsgb_pcs;
+ phy_tus = DIV_64BIT(tu_per_sec,
+ e822_vernier[link_spd].rx_desk_rsgb_pcs);
else
phy_tus = 0;
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = tu_per_sec / 10000;
+ fixed_offset = DIV_64BIT(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].tx_fixed_delay;
- fixed_offset /= 10000000;
+ fixed_offset = DIV_64BIT(fixed_offset, 10000000);
return fixed_offset;
}
enum ice_ptp_fec_mode fec_mode, u64 *pmd_adj)
{
u64 cur_freq, clk_incval, tu_per_sec, mult, adj;
+ u32 pmd_adj_divisor, val;
enum ice_status status;
u8 pmd_align;
- u32 val;
status = ice_read_phy_reg_e822(hw, port, P_REG_PMD_ALIGNMENT, &val);
if (status) {
/* Calculate TUs per second */
tu_per_sec = cur_freq * clk_incval;
+ /* Get the link speed dependent PMD adjustment divisor */
+ pmd_adj_divisor = e822_vernier[link_spd].pmd_adj_divisor;
+
/* The PMD alignment adjustment measurement depends on the link speed,
* and whether FEC is enabled. For each link speed, the alignment
* adjustment is calculated by dividing a value by the length of
* divide by 125, and then handle remaining divisor based on the link
* speed pmd_adj_divisor value.
*/
- adj = tu_per_sec / 125;
+ adj = DIV_64BIT(tu_per_sec, 125);
adj *= mult;
- adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ adj = DIV_64BIT(adj, pmd_adj_divisor);
/* Finally, for 25G-RS and 50G-RS, a further adjustment for the Rx
* cycle count is necessary.
if (rx_cycle) {
mult = (4 - rx_cycle) * 40;
- cycle_adj = tu_per_sec / 125;
+ cycle_adj = DIV_64BIT(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
if (rx_cycle) {
mult = rx_cycle * 40;
- cycle_adj = tu_per_sec / 125;
+ cycle_adj = DIV_64BIT(tu_per_sec, 125);
cycle_adj *= mult;
- cycle_adj /= e822_vernier[link_spd].pmd_adj_divisor;
+ cycle_adj = DIV_64BIT(cycle_adj, pmd_adj_divisor);
adj += cycle_adj;
}
* overflows 64 bit integer arithmetic, so break it up into two
* divisions by 1e4 first then by 1e7.
*/
- fixed_offset = tu_per_sec / 10000;
+ fixed_offset = DIV_64BIT(tu_per_sec, 10000);
fixed_offset *= e822_vernier[link_spd].rx_fixed_delay;
- fixed_offset /= 10000000;
+ fixed_offset = DIV_64BIT(fixed_offset, 10000000);
return fixed_offset;
}
return ICE_SUCCESS;
}
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static enum ice_status
+ice_get_pca9575_handle(struct ice_hw *hw, __le16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u8 idx;
+
+ if (!hw || !pca9575_handle)
+ return ICE_ERR_PARAM;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return ICE_SUCCESS;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return ICE_ERR_NOT_SUPPORTED;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle = desc.params.get_link_topo.addr.handle;
+ *pca9575_handle = hw->io_expander_handle;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_read_e810t_pca9575_reg
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+enum ice_status
+ice_read_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ enum ice_status status;
+ __le16 addr;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ status = ice_get_pca9575_handle(hw, &link_topo.handle);
+ if (status)
+ return status;
+
+ link_topo.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ addr = CPU_TO_LE16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
+ * ice_write_e810t_pca9575_reg
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the GPIO controller register
+ */
+enum ice_status
+ice_write_e810t_pca9575_reg(struct ice_hw *hw, u8 offset, u8 data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ enum ice_status status;
+ __le16 addr;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ status = ice_get_pca9575_handle(hw, &link_topo.handle);
+ if (status)
+ return status;
+
+ link_topo.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+ addr = CPU_TO_LE16((u16)offset);
+
+ return ice_aq_write_i2c(hw, link_topo, 0, addr, 1, &data, NULL);
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. Only bits 3-7 in data are valid.
+ */
+enum ice_status ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ enum ice_status status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_E810T_SMA_MIN_BIT; i <= ICE_E810T_SMA_MAX_BIT; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_E810T_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. Only bits 3-7 in data are valid.
+ */
+enum ice_status ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ enum ice_status status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_E810T_SMA_MIN_BIT; i <= ICE_E810T_SMA_MAX_BIT; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_E810T_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_e810t_is_pca9575_present
+ * @hw: pointer to the hw struct
+ *
+ * Check if the SW IO expander is present in the netlist
+ */
+bool ice_e810t_is_pca9575_present(struct ice_hw *hw)
+{
+ enum ice_status status;
+ __le16 handle = 0;
+
+ if (!ice_is_e810t(hw))
+ return false;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (!status && handle)
+ return true;
+
+ return false;
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and