#include "otx2_ethdev.h"
-static inline void
-otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
-{
- RTE_SET_USED(eth_dev);
-}
-
-static inline void
-otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
-{
- RTE_SET_USED(eth_dev);
-}
-
static inline uint64_t
nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
{
return otx2_mbox_process(mbox);
}
+static int
+npc_rx_enable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+npc_rx_disable(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_start_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf(dev))
+ return 0;
+
+ if (en)
+ otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
+ else
+ otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
+static int
+nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+
+ if (otx2_dev_is_vf(dev))
+ return 0;
+
+ otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
+
+ return otx2_mbox_process(mbox);
+}
+
static inline void
nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
{
/* Many to one reduction */
aq->cq.qint_idx = qid % dev->qints;
+ /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
+ aq->cq.cint_idx = qid;
if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
uint16_t min_rx_drop;
return NIX_MAXSQESZ_W8;
}
+static uint16_t
+nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct rte_eth_conf *conf = &data->dev_conf;
+ struct rte_eth_rxmode *rxmode = &conf->rxmode;
+ uint16_t flags = 0;
+
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ flags |= NIX_RX_OFFLOAD_RSS_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+ flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ flags |= NIX_RX_MULTI_SEG_F;
+
+ if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_QINQ_STRIP))
+ flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
+
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
+ flags |= NIX_RX_OFFLOAD_TSTAMP_F;
+
+ return flags;
+}
+
+static uint16_t
+nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ uint64_t conf = dev->tx_offloads;
+ uint16_t flags = 0;
+
+ /* Fastpath is dependent on these enums */
+ RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
+ RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
+
+ if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
+ conf & DEV_TX_OFFLOAD_QINQ_INSERT)
+ flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
+
+ if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
+
+ if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
+ conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
+ conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
+ flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
+
+ if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
+
+ if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
+ flags |= NIX_TX_MULTI_SEG_F;
+
+ return flags;
+}
+
static int
nix_sq_init(struct otx2_eth_txq *txq)
{
goto fail;
}
- if (rte_eal_iova_mode() != RTE_IOVA_VA) {
- otx2_err("iova mode should be va");
- goto fail;
- }
-
if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
otx2_err("Setting link speed/duplex not supported");
goto fail;
if (dev->configured == 1) {
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
otx2_nix_vlan_fini(eth_dev);
+ otx2_flow_free_all_resources(dev);
oxt2_nix_unregister_queue_irqs(eth_dev);
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
nix_set_nop_rxtx_function(eth_dev);
rc = nix_store_queue_cfg_and_then_release(eth_dev);
if (rc)
nix_lf_free(dev);
}
- if (otx2_dev_is_A0(dev) &&
+ if (otx2_dev_is_Ax(dev) &&
(txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
(txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
dev->rx_offloads = rxmode->offloads;
dev->tx_offloads = txmode->offloads;
+ dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
+ dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
dev->rss_info.rss_grps = NIX_RSS_GRPS;
nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
goto free_nix_lf;
}
+ /* Register cq IRQs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (eth_dev->data->nb_rx_queues > dev->cints) {
+ otx2_err("Rx interrupt cannot be enabled, rxq > %d",
+ dev->cints);
+ goto free_nix_lf;
+ }
+ /* Rx interrupt feature cannot work with vector mode because,
+ * vector mode doesn't process packets unless min 4 pkts are
+ * received, while cq interrupts are generated even for 1 pkt
+ * in the CQ.
+ */
+ dev->scalar_ena = true;
+
+ rc = oxt2_nix_register_cq_irqs(eth_dev);
+ if (rc) {
+ otx2_err("Failed to register CQ interrupts rc=%d", rc);
+ goto free_nix_lf;
+ }
+ }
+
+ /* Configure loop back mode */
+ rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
+ if (rc) {
+ otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
+ goto free_nix_lf;
+ }
+
rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
if (rc) {
otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
return rc;
}
+static void
+otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ struct rte_mbuf *rx_pkts[32];
+ struct otx2_eth_rxq *rxq;
+ int count, i, j, rc;
+
+ nix_cgx_stop_link_event(dev);
+ npc_rx_disable(dev);
+
+ /* Stop rx queues and free up pkts pending */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = otx2_nix_rx_queue_stop(eth_dev, i);
+ if (rc)
+ continue;
+
+ rxq = eth_dev->data->rx_queues[i];
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ while (count) {
+ for (j = 0; j < count; j++)
+ rte_pktmbuf_free(rx_pkts[j]);
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ }
+ }
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ otx2_nix_tx_queue_stop(eth_dev, i);
+}
+
+static int
+otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ if (eth_dev->data->nb_rx_queues != 0) {
+ rc = otx2_nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
+ /* Start rx queues */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = otx2_nix_rx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = otx2_nix_tx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
+ if (rc) {
+ otx2_err("Failed to update flow ctrl mode %d", rc);
+ return rc;
+ }
+
+ rc = npc_rx_enable(dev);
+ if (rc) {
+ otx2_err("Failed to enable NPC rx %d", rc);
+ return rc;
+ }
+
+ otx2_nix_toggle_flag_link_cfg(dev, true);
+
+ rc = nix_cgx_start_link_event(dev);
+ if (rc) {
+ otx2_err("Failed to start cgx link event %d", rc);
+ goto rx_disable;
+ }
+
+ otx2_nix_toggle_flag_link_cfg(dev, false);
+ otx2_eth_set_tx_function(eth_dev);
+ otx2_eth_set_rx_function(eth_dev);
+
+ return 0;
+
+rx_disable:
+ npc_rx_disable(dev);
+ otx2_nix_toggle_flag_link_cfg(dev, false);
+ return rc;
+}
+
+static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
+static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
+
/* Initialize and register driver with DPDK Application */
static const struct eth_dev_ops otx2_eth_dev_ops = {
.dev_infos_get = otx2_nix_info_get,
.tx_queue_release = otx2_nix_tx_queue_release,
.rx_queue_setup = otx2_nix_rx_queue_setup,
.rx_queue_release = otx2_nix_rx_queue_release,
+ .dev_start = otx2_nix_dev_start,
+ .dev_stop = otx2_nix_dev_stop,
+ .dev_close = otx2_nix_dev_close,
.tx_queue_start = otx2_nix_tx_queue_start,
.tx_queue_stop = otx2_nix_tx_queue_stop,
.rx_queue_start = otx2_nix_rx_queue_start,
.rx_queue_stop = otx2_nix_rx_queue_stop,
+ .dev_set_link_up = otx2_nix_dev_set_link_up,
+ .dev_set_link_down = otx2_nix_dev_set_link_down,
.dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
+ .dev_reset = otx2_nix_dev_reset,
.stats_get = otx2_nix_dev_stats_get,
.stats_reset = otx2_nix_dev_stats_reset,
.get_reg = otx2_nix_dev_get_reg,
+ .mtu_set = otx2_nix_mtu_set,
.mac_addr_add = otx2_nix_mac_addr_add,
.mac_addr_remove = otx2_nix_mac_addr_del,
.mac_addr_set = otx2_nix_mac_addr_set,
.filter_ctrl = otx2_nix_dev_filter_ctrl,
.get_module_info = otx2_nix_get_module_info,
.get_module_eeprom = otx2_nix_get_module_eeprom,
+ .fw_version_get = otx2_nix_fw_version_get,
.flow_ctrl_get = otx2_nix_flow_ctrl_get,
.flow_ctrl_set = otx2_nix_flow_ctrl_set,
.timesync_enable = otx2_nix_timesync_enable,
.vlan_offload_set = otx2_nix_vlan_offload_set,
.vlan_filter_set = otx2_nix_vlan_filter_set,
.vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
+ .vlan_tpid_set = otx2_nix_vlan_tpid_set,
+ .vlan_pvid_set = otx2_nix_vlan_pvid_set,
+ .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
+ .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
};
static inline int
dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
- if (otx2_dev_is_A0(dev)) {
+ if (otx2_dev_is_Ax(dev)) {
dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
}
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ /* Clear the flag since we are closing down */
+ dev->configured = 0;
+
/* Disable nix bpid config */
otx2_nix_rxchan_bpid_cfg(eth_dev, false);
+ npc_rx_disable(dev);
+
/* Disable vlan offloads */
otx2_nix_vlan_fini(eth_dev);
if (otx2_ethdev_is_ptp_en(dev))
otx2_nix_timesync_disable(eth_dev);
+ nix_cgx_stop_link_event(dev);
+
/* Free up SQs */
for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
/* Unregister queue irqs */
oxt2_nix_unregister_queue_irqs(eth_dev);
+ /* Unregister cq irqs */
+ if (eth_dev->data->dev_conf.intr_conf.rxq)
+ oxt2_nix_unregister_cq_irqs(eth_dev);
+
rc = nix_lf_free(dev);
if (rc)
otx2_err("Failed to free nix lf, rc=%d", rc);
return 0;
}
+static void
+otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
+{
+ otx2_eth_dev_uninit(eth_dev, true);
+}
+
+static int
+otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
+{
+ int rc;
+
+ rc = otx2_eth_dev_uninit(eth_dev, false);
+ if (rc)
+ return rc;
+
+ return otx2_eth_dev_init(eth_dev);
+}
+
static int
nix_remove(struct rte_pci_device *pci_dev)
{
static struct rte_pci_driver pci_nix = {
.id_table = pci_nix_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA |
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
RTE_PCI_DRV_INTR_LSC,
.probe = nix_probe,
.remove = nix_remove,