net/cnxk: add device configuration operation
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Wed, 23 Jun 2021 04:46:13 +0000 (10:16 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 29 Jun 2021 19:41:53 +0000 (21:41 +0200)
Add device configuration op for CN9K and CN10K. Most of the
device configuration is common between two platforms except for
some supported offloads.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
doc/guides/nics/cnxk.rst
doc/guides/nics/features/cnxk.ini
doc/guides/nics/features/cnxk_vec.ini
doc/guides/nics/features/cnxk_vf.ini
drivers/net/cnxk/cn10k_ethdev.c
drivers/net/cnxk/cn9k_ethdev.c
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev.h

index ce57325..62fd1bb 100644 (file)
@@ -18,6 +18,8 @@ Features of the CNXK Ethdev PMD are:
 
 - SR-IOV VF
 - Lock-free Tx queue
+- Multiple queues for TX and RX
+- Receiver Side Scaling (RSS)
 
 Prerequisites
 -------------
index b426340..96dba2a 100644 (file)
@@ -8,6 +8,8 @@ Speed capabilities   = Y
 Lock-free Tx queue   = Y
 SR-IOV               = Y
 Multiprocess aware   = Y
+RSS hash             = Y
+Inner RSS            = Y
 Linux                = Y
 ARMv8                = Y
 Usage doc            = Y
index 292ac1e..616991c 100644 (file)
@@ -8,6 +8,8 @@ Speed capabilities   = Y
 Lock-free Tx queue   = Y
 SR-IOV               = Y
 Multiprocess aware   = Y
+RSS hash             = Y
+Inner RSS            = Y
 Linux                = Y
 ARMv8                = Y
 Usage doc            = Y
index bc2eb8a..a0bd2f1 100644 (file)
@@ -7,6 +7,8 @@
 Speed capabilities   = Y
 Lock-free Tx queue   = Y
 Multiprocess aware   = Y
+RSS hash             = Y
+Inner RSS            = Y
 Linux                = Y
 ARMv8                = Y
 Usage doc            = Y
index ff8ce31..d971bbd 100644 (file)
@@ -3,6 +3,38 @@
  */
 #include "cn10k_ethdev.h"
 
+static int
+cn10k_nix_configure(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       int rc;
+
+       /* Common nix configure */
+       rc = cnxk_nix_configure(eth_dev);
+       if (rc)
+               return rc;
+
+       plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
+                   " tx_offload_flags=0x%x",
+                   eth_dev->data->port_id, dev->rx_offload_flags,
+                   dev->tx_offload_flags);
+       return 0;
+}
+
+/* Update platform specific eth dev ops */
+static void
+nix_eth_dev_ops_override(void)
+{
+       static int init_once;
+
+       if (init_once)
+               return;
+       init_once = 1;
+
+       /* Update platform specific ops */
+       cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
+}
+
 static int
 cn10k_nix_remove(struct rte_pci_device *pci_dev)
 {
@@ -26,6 +58,8 @@ cn10k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                return rc;
        }
 
+       nix_eth_dev_ops_override();
+
        /* Common probe */
        rc = cnxk_nix_probe(pci_drv, pci_dev);
        if (rc)
index 98d2d3a..2fb7c14 100644 (file)
@@ -3,6 +3,49 @@
  */
 #include "cn9k_ethdev.h"
 
+static int
+cn9k_nix_configure(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_conf *conf = &eth_dev->data->dev_conf;
+       struct rte_eth_txmode *txmode = &conf->txmode;
+       int rc;
+
+       /* Platform specific checks */
+       if ((roc_model_is_cn96_a0() || roc_model_is_cn95_a0()) &&
+           (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
+           ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
+            (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
+               plt_err("Outer IP and SCTP checksum unsupported");
+               return -EINVAL;
+       }
+
+       /* Common nix configure */
+       rc = cnxk_nix_configure(eth_dev);
+       if (rc)
+               return rc;
+
+       plt_nix_dbg("Configured port%d platform specific rx_offload_flags=%x"
+                   " tx_offload_flags=0x%x",
+                   eth_dev->data->port_id, dev->rx_offload_flags,
+                   dev->tx_offload_flags);
+       return 0;
+}
+
+/* Update platform specific eth dev ops */
+static void
+nix_eth_dev_ops_override(void)
+{
+       static int init_once;
+
+       if (init_once)
+               return;
+       init_once = 1;
+
+       /* Update platform specific ops */
+       cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
+}
+
 static int
 cn9k_nix_remove(struct rte_pci_device *pci_dev)
 {
@@ -27,6 +70,8 @@ cn9k_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                return rc;
        }
 
+       nix_eth_dev_ops_override();
+
        /* Common probe */
        rc = cnxk_nix_probe(pci_drv, pci_dev);
        if (rc)
index 066e01c..251d6eb 100644 (file)
@@ -37,6 +37,567 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        return speed_capa;
 }
 
+uint32_t
+cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
+                      uint8_t rss_level)
+{
+       uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
+               {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
+                FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
+               {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
+                FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
+                FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
+               {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
+                FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
+                FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
+                FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
+                FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
+                FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
+       };
+       uint32_t flowkey_cfg = 0;
+
+       dev->ethdev_rss_hf = ethdev_rss;
+
+       if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+               flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
+
+       if (ethdev_rss & ETH_RSS_C_VLAN)
+               flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
+
+       if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
+               flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
+
+       if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
+               flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
+
+       if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
+               flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
+
+       if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
+               flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
+
+       if (ethdev_rss & RSS_IPV4_ENABLE)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
+
+       if (ethdev_rss & RSS_IPV6_ENABLE)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
+
+       if (ethdev_rss & ETH_RSS_TCP)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
+
+       if (ethdev_rss & ETH_RSS_UDP)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
+
+       if (ethdev_rss & ETH_RSS_SCTP)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
+
+       if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
+               flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
+
+       if (ethdev_rss & RSS_IPV6_EX_ENABLE)
+               flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
+
+       if (ethdev_rss & ETH_RSS_PORT)
+               flowkey_cfg |= FLOW_KEY_TYPE_PORT;
+
+       if (ethdev_rss & ETH_RSS_NVGRE)
+               flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
+
+       if (ethdev_rss & ETH_RSS_VXLAN)
+               flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
+
+       if (ethdev_rss & ETH_RSS_GENEVE)
+               flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
+
+       if (ethdev_rss & ETH_RSS_GTPU)
+               flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
+
+       return flowkey_cfg;
+}
+
+static void
+nix_free_queue_mem(struct cnxk_eth_dev *dev)
+{
+       plt_free(dev->rqs);
+       plt_free(dev->cqs);
+       plt_free(dev->sqs);
+       dev->rqs = NULL;
+       dev->cqs = NULL;
+       dev->sqs = NULL;
+}
+
+static int
+nix_rss_default_setup(struct cnxk_eth_dev *dev)
+{
+       struct rte_eth_dev *eth_dev = dev->eth_dev;
+       uint8_t rss_hash_level;
+       uint32_t flowkey_cfg;
+       uint64_t rss_hf;
+
+       rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+       rss_hash_level = ETH_RSS_LEVEL(rss_hf);
+       if (rss_hash_level)
+               rss_hash_level -= 1;
+
+       flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
+       return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
+}
+
+static int
+nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct cnxk_eth_qconf *tx_qconf = NULL;
+       struct cnxk_eth_qconf *rx_qconf = NULL;
+       struct cnxk_eth_rxq_sp *rxq_sp;
+       struct cnxk_eth_txq_sp *txq_sp;
+       int i, nb_rxq, nb_txq;
+       void **txq, **rxq;
+
+       nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
+       nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
+
+       tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
+       if (tx_qconf == NULL) {
+               plt_err("Failed to allocate memory for tx_qconf");
+               goto fail;
+       }
+
+       rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
+       if (rx_qconf == NULL) {
+               plt_err("Failed to allocate memory for rx_qconf");
+               goto fail;
+       }
+
+       txq = eth_dev->data->tx_queues;
+       for (i = 0; i < nb_txq; i++) {
+               if (txq[i] == NULL) {
+                       tx_qconf[i].valid = false;
+                       plt_info("txq[%d] is already released", i);
+                       continue;
+               }
+               txq_sp = cnxk_eth_txq_to_sp(txq[i]);
+               memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
+               tx_qconf[i].valid = true;
+               dev_ops->tx_queue_release(txq[i]);
+               eth_dev->data->tx_queues[i] = NULL;
+       }
+
+       rxq = eth_dev->data->rx_queues;
+       for (i = 0; i < nb_rxq; i++) {
+               if (rxq[i] == NULL) {
+                       rx_qconf[i].valid = false;
+                       plt_info("rxq[%d] is already released", i);
+                       continue;
+               }
+               rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
+               memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
+               rx_qconf[i].valid = true;
+               dev_ops->rx_queue_release(rxq[i]);
+               eth_dev->data->rx_queues[i] = NULL;
+       }
+
+       dev->tx_qconf = tx_qconf;
+       dev->rx_qconf = rx_qconf;
+       return 0;
+
+fail:
+       free(tx_qconf);
+       free(rx_qconf);
+       return -ENOMEM;
+}
+
+static int
+nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
+       struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
+       int rc, i, nb_rxq, nb_txq;
+       void **txq, **rxq;
+
+       nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
+       nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
+
+       rc = -ENOMEM;
+       /* Setup tx & rx queues with previous configuration so
+        * that the queues can be functional in cases like ports
+        * are started without re configuring queues.
+        *
+        * Usual re config sequence is like below:
+        * port_configure() {
+        *      if(reconfigure) {
+        *              queue_release()
+        *              queue_setup()
+        *      }
+        *      queue_configure() {
+        *              queue_release()
+        *              queue_setup()
+        *      }
+        * }
+        * port_start()
+        *
+        * In some application's control path, queue_configure() would
+        * NOT be invoked for TXQs/RXQs in port_configure().
+        * In such cases, queues can be functional after start as the
+        * queues are already setup in port_configure().
+        */
+       for (i = 0; i < nb_txq; i++) {
+               if (!tx_qconf[i].valid)
+                       continue;
+               rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
+                                            &tx_qconf[i].conf.tx);
+               if (rc) {
+                       plt_err("Failed to setup tx queue rc=%d", rc);
+                       txq = eth_dev->data->tx_queues;
+                       for (i -= 1; i >= 0; i--)
+                               dev_ops->tx_queue_release(txq[i]);
+                       goto fail;
+               }
+       }
+
+       free(tx_qconf);
+       tx_qconf = NULL;
+
+       for (i = 0; i < nb_rxq; i++) {
+               if (!rx_qconf[i].valid)
+                       continue;
+               rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
+                                            &rx_qconf[i].conf.rx,
+                                            rx_qconf[i].mp);
+               if (rc) {
+                       plt_err("Failed to setup rx queue rc=%d", rc);
+                       rxq = eth_dev->data->rx_queues;
+                       for (i -= 1; i >= 0; i--)
+                               dev_ops->rx_queue_release(rxq[i]);
+                       goto tx_queue_release;
+               }
+       }
+
+       free(rx_qconf);
+       rx_qconf = NULL;
+
+       return 0;
+
+tx_queue_release:
+       txq = eth_dev->data->tx_queues;
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               dev_ops->tx_queue_release(txq[i]);
+fail:
+       if (tx_qconf)
+               free(tx_qconf);
+       if (rx_qconf)
+               free(rx_qconf);
+
+       return rc;
+}
+
+static uint16_t
+nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+       RTE_SET_USED(queue);
+       RTE_SET_USED(mbufs);
+       RTE_SET_USED(pkts);
+
+       return 0;
+}
+
+static void
+nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
+{
+       /* These dummy functions are required for supporting
+        * some applications which reconfigure queues without
+        * stopping tx burst and rx burst threads(eg kni app)
+        * When the queues context is saved, txq/rxqs are released
+        * which caused app crash since rx/tx burst is still
+        * on different lcores
+        */
+       eth_dev->tx_pkt_burst = nix_eth_nop_burst;
+       eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+       rte_mb();
+}
+
+static int
+nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
+{
+       uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
+       uint8_t tun[ROC_NIX_LSO_TUN_MAX];
+       struct roc_nix *nix = &dev->nix;
+       int rc;
+
+       rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
+       if (rc)
+               return rc;
+
+       dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
+                           (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
+                           (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
+                           (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
+
+       dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
+                            (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
+                            (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
+                            (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
+       return 0;
+}
+
+static int
+nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
+{
+       struct roc_nix *nix = &dev->nix;
+       int rc;
+
+       /* Nothing much to do if offload is not enabled */
+       if (!(dev->tx_offloads &
+             (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+              DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
+               return 0;
+
+       /* Setup LSO formats in AF. Its a no-op if other ethdev has
+        * already set it up
+        */
+       rc = roc_nix_lso_fmt_setup(nix);
+       if (rc)
+               return rc;
+
+       return nix_lso_tun_fmt_update(dev);
+}
+
+int
+cnxk_nix_configure(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct rte_eth_conf *conf = &data->dev_conf;
+       struct rte_eth_rxmode *rxmode = &conf->rxmode;
+       struct rte_eth_txmode *txmode = &conf->txmode;
+       char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
+       struct roc_nix *nix = &dev->nix;
+       struct rte_ether_addr *ea;
+       uint8_t nb_rxq, nb_txq;
+       uint64_t rx_cfg;
+       void *qs;
+       int rc;
+
+       rc = -EINVAL;
+
+       /* Sanity checks */
+       if (rte_eal_has_hugepages() == 0) {
+               plt_err("Huge page is not configured");
+               goto fail_configure;
+       }
+
+       if (conf->dcb_capability_en == 1) {
+               plt_err("dcb enable is not supported");
+               goto fail_configure;
+       }
+
+       if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               plt_err("Flow director is not supported");
+               goto fail_configure;
+       }
+
+       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+           rxmode->mq_mode != ETH_MQ_RX_RSS) {
+               plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
+               goto fail_configure;
+       }
+
+       if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+               plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
+               goto fail_configure;
+       }
+
+       /* Free the resources allocated from the previous configure */
+       if (dev->configured == 1) {
+               /* Unregister queue irq's */
+               roc_nix_unregister_queue_irqs(nix);
+
+               /* Unregister CQ irqs if present */
+               if (eth_dev->data->dev_conf.intr_conf.rxq)
+                       roc_nix_unregister_cq_irqs(nix);
+
+               /* Set no-op functions */
+               nix_set_nop_rxtx_function(eth_dev);
+               /* Store queue config for later */
+               rc = nix_store_queue_cfg_and_then_release(eth_dev);
+               if (rc)
+                       goto fail_configure;
+               roc_nix_tm_fini(nix);
+               roc_nix_lf_free(nix);
+       }
+
+       dev->rx_offloads = rxmode->offloads;
+       dev->tx_offloads = txmode->offloads;
+
+       /* Prepare rx cfg */
+       rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
+       if (dev->rx_offloads &
+           (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
+               rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
+               rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
+       }
+       rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
+                  ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
+                  ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
+
+       nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
+       nb_txq = RTE_MAX(data->nb_tx_queues, 1);
+
+       /* Alloc a nix lf */
+       rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
+       if (rc) {
+               plt_err("Failed to init nix_lf rc=%d", rc);
+               goto fail_configure;
+       }
+
+       nb_rxq = data->nb_rx_queues;
+       nb_txq = data->nb_tx_queues;
+       rc = -ENOMEM;
+       if (nb_rxq) {
+               /* Allocate memory for roc rq's and cq's */
+               qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
+               if (!qs) {
+                       plt_err("Failed to alloc rqs");
+                       goto free_nix_lf;
+               }
+               dev->rqs = qs;
+
+               qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
+               if (!qs) {
+                       plt_err("Failed to alloc cqs");
+                       goto free_nix_lf;
+               }
+               dev->cqs = qs;
+       }
+
+       if (nb_txq) {
+               /* Allocate memory for roc sq's */
+               qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
+               if (!qs) {
+                       plt_err("Failed to alloc sqs");
+                       goto free_nix_lf;
+               }
+               dev->sqs = qs;
+       }
+
+       /* Re-enable NIX LF error interrupts */
+       roc_nix_err_intr_ena_dis(nix, true);
+       roc_nix_ras_intr_ena_dis(nix, true);
+
+       if (nix->rx_ptp_ena) {
+               plt_err("Both PTP and switch header enabled");
+               goto free_nix_lf;
+       }
+
+       /* Setup LSO if needed */
+       rc = nix_lso_fmt_setup(dev);
+       if (rc) {
+               plt_err("Failed to setup nix lso format fields, rc=%d", rc);
+               goto free_nix_lf;
+       }
+
+       /* Configure RSS */
+       rc = nix_rss_default_setup(dev);
+       if (rc) {
+               plt_err("Failed to configure rss rc=%d", rc);
+               goto free_nix_lf;
+       }
+
+       /* Init the default TM scheduler hierarchy */
+       rc = roc_nix_tm_init(nix);
+       if (rc) {
+               plt_err("Failed to init traffic manager, rc=%d", rc);
+               goto free_nix_lf;
+       }
+
+       rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
+       if (rc) {
+               plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
+               goto tm_fini;
+       }
+
+       /* Register queue IRQs */
+       rc = roc_nix_register_queue_irqs(nix);
+       if (rc) {
+               plt_err("Failed to register queue interrupts rc=%d", rc);
+               goto tm_fini;
+       }
+
+       /* Register cq IRQs */
+       if (eth_dev->data->dev_conf.intr_conf.rxq) {
+               if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
+                       plt_err("Rx interrupt cannot be enabled, rxq > %d",
+                               dev->nix.cints);
+                       goto q_irq_fini;
+               }
+               /* Rx interrupt feature cannot work with vector mode because,
+                * vector mode does not process packets unless min 4 pkts are
+                * received, while cq interrupts are generated even for 1 pkt
+                * in the CQ.
+                */
+               dev->scalar_ena = true;
+
+               rc = roc_nix_register_cq_irqs(nix);
+               if (rc) {
+                       plt_err("Failed to register CQ interrupts rc=%d", rc);
+                       goto q_irq_fini;
+               }
+       }
+
+       /* Configure loop back mode */
+       rc = roc_nix_mac_loopback_enable(nix,
+                                        eth_dev->data->dev_conf.lpbk_mode);
+       if (rc) {
+               plt_err("Failed to configure cgx loop back mode rc=%d", rc);
+               goto cq_fini;
+       }
+
+       /*
+        * Restore queue config when reconfigure followed by
+        * reconfigure and no queue configure invoked from application case.
+        */
+       if (dev->configured == 1) {
+               rc = nix_restore_queue_cfg(eth_dev);
+               if (rc)
+                       goto cq_fini;
+       }
+
+       /* Update the mac address */
+       ea = eth_dev->data->mac_addrs;
+       memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
+       if (rte_is_zero_ether_addr(ea))
+               rte_eth_random_addr((uint8_t *)ea);
+
+       rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
+
+       plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
+                   " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
+                   eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
+                   dev->rx_offloads, dev->tx_offloads);
+
+       /* All good */
+       dev->configured = 1;
+       dev->nb_rxq = data->nb_rx_queues;
+       dev->nb_txq = data->nb_tx_queues;
+       return 0;
+
+cq_fini:
+       roc_nix_unregister_cq_irqs(nix);
+q_irq_fini:
+       roc_nix_unregister_queue_irqs(nix);
+tm_fini:
+       roc_nix_tm_fini(nix);
+free_nix_lf:
+       nix_free_queue_mem(dev);
+       rc |= roc_nix_lf_free(nix);
+fail_configure:
+       dev->configured = 0;
+       return rc;
+}
+
 /* CNXK platform independent eth dev ops */
 struct eth_dev_ops cnxk_eth_dev_ops = {
        .dev_infos_get = cnxk_nix_info_get,
@@ -75,6 +636,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        dev->eth_dev = eth_dev;
+       dev->configured = 0;
 
        /* For vfs, returned max_entries will be 0. but to keep default mac
         * address, one entry must be allocated. so setting up to 1.
@@ -156,6 +718,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       /* Clear the flag since we are closing down */
+       dev->configured = 0;
+
        roc_nix_npc_rx_ena_dis(nix, false);
 
        /* Free up SQs */
@@ -182,6 +747,9 @@ cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
        if (eth_dev->data->dev_conf.intr_conf.rxq)
                roc_nix_unregister_cq_irqs(nix);
 
+       /* Free ROC RQ's, SQ's and CQ's memory */
+       nix_free_queue_mem(dev);
+
        /* Free nix lf resources */
        rc = roc_nix_lf_free(nix);
        if (rc)
index 8d9a7e0..291f5f9 100644 (file)
         DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |         \
         DEV_RX_OFFLOAD_RSS_HASH)
 
+#define RSS_IPV4_ENABLE                                                        \
+       (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP |         \
+        ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_SCTP)
+
+#define RSS_IPV6_ENABLE                                                        \
+       (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_UDP |         \
+        ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define RSS_IPV6_EX_ENABLE                                                     \
+       (ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | ETH_RSS_IPV6_UDP_EX)
+
+#define RSS_MAX_LEVELS 3
+
+#define RSS_IPV4_INDEX 0
+#define RSS_IPV6_INDEX 1
+#define RSS_TCP_INDEX  2
+#define RSS_UDP_INDEX  3
+#define RSS_SCTP_INDEX 4
+#define RSS_DMAC_INDEX 5
+
+struct cnxk_eth_qconf {
+       union {
+               struct rte_eth_txconf tx;
+               struct rte_eth_rxconf rx;
+       } conf;
+       struct rte_mempool *mp;
+       uint16_t nb_desc;
+       uint8_t valid;
+};
+
 struct cnxk_eth_dev {
        /* ROC NIX */
        struct roc_nix nix;
 
+       /* ROC RQs, SQs and CQs */
+       struct roc_nix_rq *rqs;
+       struct roc_nix_sq *sqs;
+       struct roc_nix_cq *cqs;
+
+       /* Configured queue count */
+       uint16_t nb_rxq;
+       uint16_t nb_txq;
+       uint8_t configured;
+
        /* Max macfilter entries */
        uint8_t max_mac_entries;
 
@@ -90,17 +130,57 @@ struct cnxk_eth_dev {
        uint64_t rx_offload_capa;
        uint64_t tx_offload_capa;
        uint32_t speed_capa;
+       /* Configured Rx and Tx offloads */
+       uint64_t rx_offloads;
+       uint64_t tx_offloads;
+       /* Platform specific offload flags */
+       uint16_t rx_offload_flags;
+       uint16_t tx_offload_flags;
+
+       /* ETHDEV RSS HF bitmask */
+       uint64_t ethdev_rss_hf;
+
+       /* Saved qconf before lf realloc */
+       struct cnxk_eth_qconf *tx_qconf;
+       struct cnxk_eth_qconf *rx_qconf;
 
        /* Default mac address */
        uint8_t mac_addr[RTE_ETHER_ADDR_LEN];
+
+       /* LSO Tunnel format indices */
+       uint64_t lso_tun_fmt;
 };
 
+struct cnxk_eth_rxq_sp {
+       struct cnxk_eth_dev *dev;
+       struct cnxk_eth_qconf qconf;
+       uint16_t qid;
+} __plt_cache_aligned;
+
+struct cnxk_eth_txq_sp {
+       struct cnxk_eth_dev *dev;
+       struct cnxk_eth_qconf qconf;
+       uint16_t qid;
+} __plt_cache_aligned;
+
 static inline struct cnxk_eth_dev *
 cnxk_eth_pmd_priv(struct rte_eth_dev *eth_dev)
 {
        return eth_dev->data->dev_private;
 }
 
+static inline struct cnxk_eth_rxq_sp *
+cnxk_eth_rxq_to_sp(void *__rxq)
+{
+       return ((struct cnxk_eth_rxq_sp *)__rxq) - 1;
+}
+
+static inline struct cnxk_eth_txq_sp *
+cnxk_eth_txq_to_sp(void *__txq)
+{
+       return ((struct cnxk_eth_txq_sp *)__txq) - 1;
+}
+
 /* Common ethdev ops */
 extern struct eth_dev_ops cnxk_eth_dev_ops;
 
@@ -110,6 +190,11 @@ int cnxk_nix_probe(struct rte_pci_driver *pci_drv,
 int cnxk_nix_remove(struct rte_pci_device *pci_dev);
 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
                      struct rte_eth_dev_info *dev_info);
+int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
+
+/* RSS */
+uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
+                               uint8_t rss_level);
 
 /* Devargs */
 int cnxk_ethdev_parse_devargs(struct rte_devargs *devargs,