net/cnxk: support Rx/Tx burst mode query
[dpdk.git] / drivers / net / cnxk / cnxk_ethdev.c
index 424512c..0311df3 100644 (file)
@@ -37,6 +37,50 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        return speed_capa;
 }
 
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct rte_eth_dev *eth_dev;
+       struct cnxk_eth_dev *dev;
+       uint32_t buffsz;
+
+       dev = rxq->dev;
+       eth_dev = dev->eth_dev;
+
+       /* Get rx buffer size */
+       mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+       buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+       if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+               dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+               dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+       }
+}
+
+static int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct cnxk_eth_rxq_sp *rxq;
+       uint16_t mtu;
+       int rc;
+
+       rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+       /* Setup scatter mode if needed by jumbo */
+       nix_enable_mseg_on_jumbo(rxq);
+
+       /* Setup MTU based on max_rx_pkt_len */
+       mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+                               CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+       rc = cnxk_nix_mtu_set(eth_dev, mtu);
+       if (rc)
+               plt_err("Failed to set default MTU size, rc=%d", rc);
+
+       return rc;
+}
+
 uint64_t
 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
 {
@@ -866,12 +910,211 @@ fail_configure:
        return rc;
 }
 
+static int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc = -EINVAL;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, true);
+       if (rc) {
+               plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_sq *sq = &dev->sqs[qid];
+       int rc;
+
+       if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_tm_sq_aura_fc(sq, false);
+       if (rc) {
+               plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+                       rc);
+               goto done;
+       }
+
+       data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, true);
+       if (rc) {
+               plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct rte_eth_dev_data *data = eth_dev->data;
+       struct roc_nix_rq *rq = &dev->rqs[qid];
+       int rc;
+
+       if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+               return 0;
+
+       rc = roc_nix_rq_ena_dis(rq, false);
+       if (rc) {
+               plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+               goto done;
+       }
+
+       data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+       return rc;
+}
+
+static int
+cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+       struct rte_mbuf *rx_pkts[32];
+       int count, i, j, rc;
+       void *rxq;
+
+       /* Disable switch hdr pkind */
+       roc_nix_switch_hdr_set(&dev->nix, 0);
+
+       /* Stop link change events */
+       if (!roc_nix_is_vf_or_sdp(&dev->nix))
+               roc_nix_mac_link_event_start_stop(&dev->nix, false);
+
+       /* Disable Rx via NPC */
+       roc_nix_npc_rx_ena_dis(&dev->nix, false);
+
+       /* Stop rx queues and free up pkts pending */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rc = dev_ops->rx_queue_stop(eth_dev, i);
+               if (rc)
+                       continue;
+
+               rxq = eth_dev->data->rx_queues[i];
+               count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+               while (count) {
+                       for (j = 0; j < count; j++)
+                               rte_pktmbuf_free(rx_pkts[j]);
+                       count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+               }
+       }
+
+       /* Stop tx queues  */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+               dev_ops->tx_queue_stop(eth_dev, i);
+
+       return 0;
+}
+
+int
+cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       int rc, i;
+
+       if (eth_dev->data->nb_rx_queues != 0) {
+               rc = nix_recalc_mtu(eth_dev);
+               if (rc)
+                       return rc;
+       }
+
+       /* Start rx queues */
+       for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+               rc = cnxk_nix_rx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       /* Start tx queues  */
+       for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+               rc = cnxk_nix_tx_queue_start(eth_dev, i);
+               if (rc)
+                       return rc;
+       }
+
+       /* Enable Rx in NPC */
+       rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
+       if (rc) {
+               plt_err("Failed to enable NPC rx %d", rc);
+               return rc;
+       }
+
+       cnxk_nix_toggle_flag_link_cfg(dev, true);
+
+       /* Start link change events */
+       if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
+               rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
+               if (rc) {
+                       plt_err("Failed to start cgx link event %d", rc);
+                       goto rx_disable;
+               }
+       }
+
+       cnxk_nix_toggle_flag_link_cfg(dev, false);
+
+       return 0;
+
+rx_disable:
+       roc_nix_npc_rx_ena_dis(&dev->nix, false);
+       cnxk_nix_toggle_flag_link_cfg(dev, false);
+       return rc;
+}
+
 /* CNXK platform independent eth dev ops */
 struct eth_dev_ops cnxk_eth_dev_ops = {
+       .mtu_set = cnxk_nix_mtu_set,
+       .mac_addr_add = cnxk_nix_mac_addr_add,
+       .mac_addr_remove = cnxk_nix_mac_addr_del,
+       .mac_addr_set = cnxk_nix_mac_addr_set,
        .dev_infos_get = cnxk_nix_info_get,
        .link_update = cnxk_nix_link_update,
        .tx_queue_release = cnxk_nix_tx_queue_release,
        .rx_queue_release = cnxk_nix_rx_queue_release,
+       .dev_stop = cnxk_nix_dev_stop,
+       .tx_queue_start = cnxk_nix_tx_queue_start,
+       .rx_queue_start = cnxk_nix_rx_queue_start,
+       .rx_queue_stop = cnxk_nix_rx_queue_stop,
+       .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
+       .promiscuous_enable = cnxk_nix_promisc_enable,
+       .promiscuous_disable = cnxk_nix_promisc_disable,
+       .allmulticast_enable = cnxk_nix_allmulticast_enable,
+       .allmulticast_disable = cnxk_nix_allmulticast_disable,
+       .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
+       .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
 };
 
 static int
@@ -911,6 +1154,7 @@ cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
 
        dev->eth_dev = eth_dev;
        dev->configured = 0;
+       dev->ptype_disable = 0;
 
        /* For vfs, returned max_entries will be 0. but to keep default mac
         * address, one entry must be allocated. so setting up to 1.