+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ uint32_t buffsz;
+
+ dev = rxq->dev;
+ eth_dev = dev->eth_dev;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+}
+
+int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_eth_rxq_sp *rxq;
+ uint16_t mtu;
+ int rc;
+
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+ /* Setup scatter mode if needed by jumbo */
+ nix_enable_mseg_on_jumbo(rxq);
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+ CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ plt_err("Failed to set default MTU size, rc=%d", rc);
+
+ return rc;
+}
+
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_conf = {0};
+ int rc;
+
+ /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ * by AF driver, update those info in PMD structure.
+ */
+ rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+ if (rc)
+ goto exit;
+
+ fc->mode = fc_conf.mode;
+ fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+ return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_cfg = {0};
+
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
+ return 0;
+
+ fc_cfg.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (roc_model_is_cn96_ax() &&
+ dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
+ (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ fc_cfg.mode =
+ (fc_cfg.mode == RTE_FC_FULL ||
+ fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+ RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ }
+
+ return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
+uint64_t
+cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
+{
+ uint16_t port_id = dev->eth_dev->data->port_id;
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_tx_q_sz,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct roc_nix_sq *sq;
+ size_t txq_sz;
+ int rc;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[qid] != NULL) {
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
+ eth_dev->data->tx_queues[qid] = NULL;
+ }
+
+ /* Setup ROC SQ */
+ sq = &dev->sqs[qid];
+ sq->qid = qid;
+ sq->nb_desc = nb_desc;
+ sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+ rc = roc_nix_sq_init(&dev->nix, sq);
+ if (rc) {
+ plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+ txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+ if (!txq_sp) {
+ plt_err("Failed to alloc tx queue mem");
+ rc |= roc_nix_sq_fini(sq);
+ return rc;
+ }
+
+ txq_sp->dev = dev;
+ txq_sp->qid = qid;
+ txq_sp->qconf.conf.tx = *tx_conf;
+ txq_sp->qconf.nb_desc = nb_desc;
+
+ plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+ " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+ qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+ sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+ /* Store start of fast path area */
+ eth_dev->data->tx_queues[qid] = txq_sp + 1;
+ eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(void *txq)
+{
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_sq *sq;
+ uint16_t qid;
+ int rc;
+
+ if (!txq)
+ return;
+
+ txq_sp = cnxk_eth_txq_to_sp(txq);
+ dev = txq_sp->dev;
+ qid = txq_sp->qid;
+
+ plt_nix_dbg("Releasing txq %u", qid);
+
+ /* Cleanup ROC SQ */
+ sq = &dev->sqs[qid];
+ rc = roc_nix_sq_fini(sq);
+ if (rc)
+ plt_err("Failed to cleanup sq, rc=%d", rc);
+
+ /* Finally free */
+ plt_free(txq_sp);
+}
+
+int
+cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_mempool_ops *ops;
+ const char *platform_ops;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t first_skip;
+ int rc = -EINVAL;
+ size_t rxq_sz;
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ plt_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs cnxk_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ plt_err("mempool ops should be of cnxk_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ plt_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[qid] != NULL) {
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+ eth_dev->data->rx_queues[qid] = NULL;
+ }
+
+ /* Setup ROC CQ */
+ cq = &dev->cqs[qid];
+ cq->qid = qid;
+ cq->nb_desc = nb_desc;
+ rc = roc_nix_cq_init(&dev->nix, cq);
+ if (rc) {
+ plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Setup ROC RQ */
+ rq = &dev->rqs[qid];
+ rq->qid = qid;
+ rq->aura_handle = mp->pool_id;
+ rq->flow_tag_width = 32;
+ rq->sso_ena = false;
+
+ /* Calculate first mbuf skip */
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rq->first_skip = first_skip;
+ rq->later_skip = sizeof(struct rte_mbuf);
+ rq->lpb_size = mp->elt_size;
+
+ rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
+ if (rc) {
+ plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
+ goto cq_fini;
+ }
+
+ /* Allocate and setup fast path rx queue */
+ rc = -ENOMEM;
+ rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
+ rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
+ if (!rxq_sp) {
+ plt_err("Failed to alloc rx queue for rq=%d", qid);
+ goto rq_fini;
+ }
+
+ /* Setup slow path fields */
+ rxq_sp->dev = dev;
+ rxq_sp->qid = qid;
+ rxq_sp->qconf.conf.rx = *rx_conf;
+ rxq_sp->qconf.nb_desc = nb_desc;
+ rxq_sp->qconf.mp = mp;
+
+ plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
+ cq->nb_desc);
+
+ /* Store start of fast path area */
+ eth_dev->data->rx_queues[qid] = rxq_sp + 1;
+ eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
+ return 0;
+rq_fini:
+ rc |= roc_nix_rq_fini(rq);
+cq_fini:
+ rc |= roc_nix_cq_fini(cq);
+fail:
+ return rc;
+}
+
+static void
+cnxk_nix_rx_queue_release(void *rxq)
+{
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t qid;
+ int rc;
+
+ if (!rxq)
+ return;
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ dev = rxq_sp->dev;
+ qid = rxq_sp->qid;
+
+ plt_nix_dbg("Releasing rxq %u", qid);
+
+ /* Cleanup ROC RQ */
+ rq = &dev->rqs[qid];
+ rc = roc_nix_rq_fini(rq);
+ if (rc)
+ plt_err("Failed to cleanup rq, rc=%d", rc);
+
+ /* Cleanup ROC CQ */
+ cq = &dev->cqs[qid];
+ rc = roc_nix_cq_fini(cq);
+ if (rc)
+ plt_err("Failed to cleanup cq, rc=%d", rc);
+
+ /* Finally free fast path area */
+ plt_free(rxq_sp);
+}
+