return NIX_TX_OFFLOAD_CAPA;
}
+static const struct otx2_dev_ops otx2_dev_ops = {
+ .link_status_update = otx2_eth_dev_link_status_update,
+};
+
static int
nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
{
static const struct eth_dev_ops otx2_eth_dev_ops = {
.dev_infos_get = otx2_nix_info_get,
.dev_configure = otx2_nix_configure,
+ .link_update = otx2_nix_link_update,
.get_reg = otx2_nix_dev_get_reg,
};
goto error;
}
}
+ /* Device generic callbacks */
+ dev->ops = &otx2_dev_ops;
+ dev->eth_dev = eth_dev;
/* Grab the NPA LF if required */
rc = otx2_npa_lf_init(pci_dev, dev);
uint8_t max_mac_entries;
uint8_t lf_tx_stats;
uint8_t lf_rx_stats;
+ uint16_t flags;
uint16_t cints;
uint16_t qints;
uint8_t configured;
struct otx2_qint qints_mem[RTE_MAX_QUEUES_PER_PORT];
struct otx2_rss_info rss_info;
struct otx2_npc_flow_info npc_flow;
+ struct rte_eth_dev *eth_dev;
} __rte_cache_aligned;
static inline struct otx2_eth_dev *
void otx2_nix_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *dev_info);
+/* Link */
+void otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set);
+int otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete);
+void otx2_eth_dev_link_status_update(struct otx2_dev *dev,
+ struct cgx_link_user_info *link);
+
/* IRQ */
int otx2_nix_register_irqs(struct rte_eth_dev *eth_dev);
int oxt2_nix_register_queue_irqs(struct rte_eth_dev *eth_dev);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include <rte_common.h>
+#include <rte_ethdev_pci.h>
+
+#include "otx2_ethdev.h"
+
+void
+otx2_nix_toggle_flag_link_cfg(struct otx2_eth_dev *dev, bool set)
+{
+ if (set)
+ dev->flags |= OTX2_LINK_CFG_IN_PROGRESS_F;
+ else
+ dev->flags &= ~OTX2_LINK_CFG_IN_PROGRESS_F;
+
+ rte_wmb();
+}
+
+static inline int
+nix_wait_for_link_cfg(struct otx2_eth_dev *dev)
+{
+ uint16_t wait = 1000;
+
+ do {
+ rte_rmb();
+ if (!(dev->flags & OTX2_LINK_CFG_IN_PROGRESS_F))
+ break;
+ wait--;
+ rte_delay_ms(1);
+ } while (wait);
+
+ return wait ? 0 : -1;
+}
+
+static void
+nix_link_status_print(struct rte_eth_dev *eth_dev, struct rte_eth_link *link)
+{
+ if (link && link->link_status)
+ otx2_info("Port %d: Link Up - speed %u Mbps - %s",
+ (int)(eth_dev->data->port_id),
+ (uint32_t)link->link_speed,
+ link->link_duplex == ETH_LINK_FULL_DUPLEX ?
+ "full-duplex" : "half-duplex");
+ else
+ otx2_info("Port %d: Link Down", (int)(eth_dev->data->port_id));
+}
+
+void
+otx2_eth_dev_link_status_update(struct otx2_dev *dev,
+ struct cgx_link_user_info *link)
+{
+ struct otx2_eth_dev *otx2_dev = (struct otx2_eth_dev *)dev;
+ struct rte_eth_dev *eth_dev = otx2_dev->eth_dev;
+ struct rte_eth_link eth_link;
+
+ if (!link || !dev || !eth_dev->data->dev_conf.intr_conf.lsc)
+ return;
+
+ if (nix_wait_for_link_cfg(otx2_dev)) {
+ otx2_err("Timeout waiting for link_cfg to complete");
+ return;
+ }
+
+ eth_link.link_status = link->link_up;
+ eth_link.link_speed = link->speed;
+ eth_link.link_autoneg = ETH_LINK_AUTONEG;
+ eth_link.link_duplex = link->full_duplex;
+
+ /* Print link info */
+ nix_link_status_print(eth_dev, ð_link);
+
+ /* Update link info */
+ rte_eth_linkstatus_set(eth_dev, ð_link);
+
+ /* Set the flag and execute application callbacks */
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+}
+
+int
+otx2_nix_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct otx2_mbox *mbox = dev->mbox;
+ struct cgx_link_info_msg *rsp;
+ struct rte_eth_link link;
+ int rc;
+
+ RTE_SET_USED(wait_to_complete);
+
+ if (otx2_dev_is_lbk(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
+ rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
+ if (rc)
+ return rc;
+
+ link.link_status = rsp->link_info.link_up;
+ link.link_speed = rsp->link_info.speed;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ if (rsp->link_info.full_duplex)
+ link.link_duplex = rsp->link_info.full_duplex;
+
+ return rte_eth_linkstatus_set(eth_dev, &link);
+}