struct cgx_link_user_info *link)
{
struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
- struct rte_eth_dev *eth_dev = otx2_dev->eth_dev;
struct rte_eth_link eth_link;
+ struct rte_eth_dev *eth_dev;
- if (!link || !dev || !eth_dev->data->dev_conf.intr_conf.lsc)
+ if (!link || !dev)
+ return;
+
+ eth_dev = otx2_dev->eth_dev;
+ if (!eth_dev || !eth_dev->data->dev_conf.intr_conf.lsc)
return;
if (nix_wait_for_link_cfg(otx2_dev)) {
_rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
+static int
+lbk_link_update(struct rte_eth_link *link)
+{
+ link->link_status = ETH_LINK_UP;
+ link->link_speed = ETH_SPEED_NUM_100G;
+ link->link_autoneg = ETH_LINK_FIXED;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ return 0;
+}
+
+static int
+cgx_link_update(struct otx2_eth_dev *dev, struct rte_eth_link *link)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_link_info_msg *rsp;
+ int rc;
+ otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ link->link_status = rsp->link_info.link_up;
+ link->link_speed = rsp->link_info.speed;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+
+ if (rsp->link_info.full_duplex)
+ link->link_duplex = rsp->link_info.full_duplex;
+ return 0;
+}
+
int
otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
- struct otx2_mbox *mbox = dev->mbox;
- struct cgx_link_info_msg *rsp;
struct rte_eth_link link;
int rc;
RTE_SET_USED(wait_to_complete);
+ memset(&link, 0, sizeof(struct rte_eth_link));
- if (otx2_dev_is_lbk(dev))
+ if (otx2_dev_is_sdp(dev))
return 0;
- otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
- rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (otx2_dev_is_lbk(dev))
+ rc = lbk_link_update(&link);
+ else
+ rc = cgx_link_update(dev, &link);
+
if (rc)
return rc;
- link.link_status = rsp->link_info.link_up;
- link.link_speed = rsp->link_info.speed;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}
- if (rsp->link_info.full_duplex)
- link.link_duplex = rsp->link_info.full_duplex;
+static int
+nix_dev_set_link_state(struct rte_eth_dev *eth_dev, uint8_t enable)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_set_link_state_msg *req;
- return rte_eth_linkstatus_set(eth_dev, &link);
+ req = otx2_mbox_alloc_msg_cgx_set_link_state(mbox);
+ req->enable = enable;
+ return otx2_mbox_process(mbox);
+}
+
+int
+otx2_nix_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ rc = nix_dev_set_link_state(eth_dev, 1);
+ if (rc)
+ goto done;
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_start(eth_dev, i);
+
+done:
+ return rc;
+}
+
+int
+otx2_nix_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int i;
+
+ if (otx2_dev_is_vf_or_sdp(dev))
+ return -ENOTSUP;
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_stop(eth_dev, i);
+
+ return nix_dev_set_link_state(eth_dev, 0);
}