return speed_capa;
}
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ uint32_t buffsz;
+
+ dev = rxq->dev;
+ eth_dev = dev->eth_dev;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+}
+
+static int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_eth_rxq_sp *rxq;
+ uint16_t mtu;
+ int rc;
+
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+ /* Setup scatter mode if needed by jumbo */
+ nix_enable_mseg_on_jumbo(rxq);
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+ CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ plt_err("Failed to set default MTU size, rc=%d", rc);
+
+ return rc;
+}
+
uint64_t
cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
{
return *tmp;
}
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_tx_q_sz,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct roc_nix_sq *sq;
+ size_t txq_sz;
+ int rc;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[qid] != NULL) {
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+ eth_dev->data->tx_queues[qid] = NULL;
+ }
+
+ /* Setup ROC SQ */
+ sq = &dev->sqs[qid];
+ sq->qid = qid;
+ sq->nb_desc = nb_desc;
+ sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+ rc = roc_nix_sq_init(&dev->nix, sq);
+ if (rc) {
+ plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+ txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+ if (!txq_sp) {
+ plt_err("Failed to alloc tx queue mem");
+ rc |= roc_nix_sq_fini(sq);
+ return rc;
+ }
+
+ txq_sp->dev = dev;
+ txq_sp->qid = qid;
+ txq_sp->qconf.conf.tx = *tx_conf;
+ txq_sp->qconf.nb_desc = nb_desc;
+
+ plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+ " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+ qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+ sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+ /* Store start of fast path area */
+ eth_dev->data->tx_queues[qid] = txq_sp + 1;
+ eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(void *txq)
+{
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_sq *sq;
+ uint16_t qid;
+ int rc;
+
+ if (!txq)
+ return;
+
+ txq_sp = cnxk_eth_txq_to_sp(txq);
+ dev = txq_sp->dev;
+ qid = txq_sp->qid;
+
+ plt_nix_dbg("Releasing txq %u", qid);
+
+ /* Cleanup ROC SQ */
+ sq = &dev->sqs[qid];
+ rc = roc_nix_sq_fini(sq);
+ if (rc)
+ plt_err("Failed to cleanup sq, rc=%d", rc);
+
+ /* Finally free */
+ plt_free(txq_sp);
+}
+
int
cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
uint16_t nb_desc, uint16_t fp_rx_q_sz,
return rc;
}
+static int
+cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc = -EINVAL;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, true);
+ if (rc) {
+ plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+int
+cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_sq *sq = &dev->sqs[qid];
+ int rc;
+
+ if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_tm_sq_aura_fc(sq, false);
+ if (rc) {
+ plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
+ rc);
+ goto done;
+ }
+
+ data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, true);
+ if (rc) {
+ plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct roc_nix_rq *rq = &dev->rqs[qid];
+ int rc;
+
+ if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
+ return 0;
+
+ rc = roc_nix_rq_ena_dis(rq, false);
+ if (rc) {
+ plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
+ goto done;
+ }
+
+ data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+done:
+ return rc;
+}
+
+static int
+cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct rte_mbuf *rx_pkts[32];
+ int count, i, j, rc;
+ void *rxq;
+
+ /* Disable switch hdr pkind */
+ roc_nix_switch_hdr_set(&dev->nix, 0);
+
+ /* Stop link change events */
+ if (!roc_nix_is_vf_or_sdp(&dev->nix))
+ roc_nix_mac_link_event_start_stop(&dev->nix, false);
+
+ /* Disable Rx via NPC */
+ roc_nix_npc_rx_ena_dis(&dev->nix, false);
+
+ /* Stop rx queues and free up pkts pending */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = dev_ops->rx_queue_stop(eth_dev, i);
+ if (rc)
+ continue;
+
+ rxq = eth_dev->data->rx_queues[i];
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ while (count) {
+ for (j = 0; j < count; j++)
+ rte_pktmbuf_free(rx_pkts[j]);
+ count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
+ }
+ }
+
+ /* Stop tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ dev_ops->tx_queue_stop(eth_dev, i);
+
+ return 0;
+}
+
+int
+cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ int rc, i;
+
+ if (eth_dev->data->nb_rx_queues != 0) {
+ rc = nix_recalc_mtu(eth_dev);
+ if (rc)
+ return rc;
+ }
+
+ /* Start rx queues */
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rc = cnxk_nix_rx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Start tx queues */
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ rc = cnxk_nix_tx_queue_start(eth_dev, i);
+ if (rc)
+ return rc;
+ }
+
+ /* Enable Rx in NPC */
+ rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
+ if (rc) {
+ plt_err("Failed to enable NPC rx %d", rc);
+ return rc;
+ }
+
+ cnxk_nix_toggle_flag_link_cfg(dev, true);
+
+ /* Start link change events */
+ if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
+ rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
+ if (rc) {
+ plt_err("Failed to start cgx link event %d", rc);
+ goto rx_disable;
+ }
+ }
+
+ cnxk_nix_toggle_flag_link_cfg(dev, false);
+
+ return 0;
+
+rx_disable:
+ roc_nix_npc_rx_ena_dis(&dev->nix, false);
+ cnxk_nix_toggle_flag_link_cfg(dev, false);
+ return rc;
+}
+
/* CNXK platform independent eth dev ops */
struct eth_dev_ops cnxk_eth_dev_ops = {
+ .mtu_set = cnxk_nix_mtu_set,
+ .mac_addr_add = cnxk_nix_mac_addr_add,
+ .mac_addr_remove = cnxk_nix_mac_addr_del,
+ .mac_addr_set = cnxk_nix_mac_addr_set,
.dev_infos_get = cnxk_nix_info_get,
.link_update = cnxk_nix_link_update,
+ .tx_queue_release = cnxk_nix_tx_queue_release,
.rx_queue_release = cnxk_nix_rx_queue_release,
+ .dev_stop = cnxk_nix_dev_stop,
+ .tx_queue_start = cnxk_nix_tx_queue_start,
+ .rx_queue_start = cnxk_nix_rx_queue_start,
+ .rx_queue_stop = cnxk_nix_rx_queue_stop,
+ .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
+ .promiscuous_enable = cnxk_nix_promisc_enable,
+ .promiscuous_disable = cnxk_nix_promisc_disable,
+ .allmulticast_enable = cnxk_nix_allmulticast_enable,
+ .allmulticast_disable = cnxk_nix_allmulticast_disable,
+ .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
+ .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
};
static int
dev->eth_dev = eth_dev;
dev->configured = 0;
+ dev->ptype_disable = 0;
/* For vfs, returned max_entries will be 0. but to keep default mac
* address, one entry must be allocated. so setting up to 1.