+int
+cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
+{
+ struct roc_nix *nix = &dev->nix;
+
+ if (dev->inb.inl_dev == use_inl_dev)
+ return 0;
+
+ plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
+ dev->inb.nb_sess, !!dev->inb.inl_dev);
+
+ /* Change the mode */
+ dev->inb.inl_dev = use_inl_dev;
+
+ /* Update RoC for NPC rule insertion */
+ roc_nix_inb_mode_set(nix, use_inl_dev);
+
+ /* Setup lookup mem */
+ return cnxk_nix_lookup_mem_sa_base_set(dev);
+}
+
+static int
+nix_security_setup(struct cnxk_eth_dev *dev)
+{
+ struct roc_nix *nix = &dev->nix;
+ int i, rc = 0;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ /* Setup Inline Inbound */
+ rc = roc_nix_inl_inb_init(nix);
+ if (rc) {
+ plt_err("Failed to initialize nix inline inb, rc=%d",
+ rc);
+ return rc;
+ }
+
+ /* By default pick using inline device for poll mode.
+ * Will be overridden when event mode rq's are setup.
+ */
+ cnxk_nix_inb_mode_set(dev, true);
+ }
+
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ struct plt_bitmap *bmap;
+ size_t bmap_sz;
+ void *mem;
+
+ /* Setup enough descriptors for all tx queues */
+ nix->outb_nb_desc = dev->outb.nb_desc;
+ nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
+
+ /* Setup Inline Outbound */
+ rc = roc_nix_inl_outb_init(nix);
+ if (rc) {
+ plt_err("Failed to initialize nix inline outb, rc=%d",
+ rc);
+ goto cleanup;
+ }
+
+ dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
+
+ /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
+ if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
+ goto done;
+
+ rc = -ENOMEM;
+ /* Allocate a bitmap to alloc and free sa indexes */
+ bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
+ mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
+ if (mem == NULL) {
+ plt_err("Outbound SA bmap alloc failed");
+
+ rc |= roc_nix_inl_outb_fini(nix);
+ goto cleanup;
+ }
+
+ rc = -EIO;
+ bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
+ if (!bmap) {
+ plt_err("Outbound SA bmap init failed");
+
+ rc |= roc_nix_inl_outb_fini(nix);
+ plt_free(mem);
+ goto cleanup;
+ }
+
+ for (i = 0; i < dev->outb.max_sa; i++)
+ plt_bitmap_set(bmap, i);
+
+ dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
+ dev->outb.sa_bmap_mem = mem;
+ dev->outb.sa_bmap = bmap;
+ }
+
+done:
+ return 0;
+cleanup:
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ rc |= roc_nix_inl_inb_fini(nix);
+ return rc;
+}
+
+static int
+nix_security_release(struct cnxk_eth_dev *dev)
+{
+ struct rte_eth_dev *eth_dev = dev->eth_dev;
+ struct cnxk_eth_sec_sess *eth_sec, *tvar;
+ struct roc_nix *nix = &dev->nix;
+ int rc, ret = 0;
+
+ /* Cleanup Inline inbound */
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ /* Destroy inbound sessions */
+ tvar = NULL;
+ RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
+ cnxk_eth_sec_ops.session_destroy(eth_dev,
+ eth_sec->sess);
+
+ /* Clear lookup mem */
+ cnxk_nix_lookup_mem_sa_base_clear(dev);
+
+ rc = roc_nix_inl_inb_fini(nix);
+ if (rc)
+ plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
+ ret |= rc;
+ }
+
+ /* Cleanup Inline outbound */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
+ dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ /* Destroy outbound sessions */
+ tvar = NULL;
+ RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
+ cnxk_eth_sec_ops.session_destroy(eth_dev,
+ eth_sec->sess);
+
+ rc = roc_nix_inl_outb_fini(nix);
+ if (rc)
+ plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
+ ret |= rc;
+
+ plt_bitmap_free(dev->outb.sa_bmap);
+ plt_free(dev->outb.sa_bmap_mem);
+ dev->outb.sa_bmap = NULL;
+ dev->outb.sa_bmap_mem = NULL;
+ }
+
+ dev->inb.inl_dev = false;
+ roc_nix_inb_mode_set(nix, false);
+ dev->nb_rxq_sso = 0;
+ dev->inb.nb_sess = 0;
+ dev->outb.nb_sess = 0;
+ return ret;
+}
+
+static void
+nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
+{
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct rte_eth_dev *eth_dev;
+ struct cnxk_eth_dev *dev;
+ uint32_t buffsz;
+
+ dev = rxq->dev;
+ eth_dev = dev->eth_dev;
+
+ /* Get rx buffer size */
+ mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
+ buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
+ dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ }
+}
+
+int
+nix_recalc_mtu(struct rte_eth_dev *eth_dev)
+{
+ struct rte_eth_dev_data *data = eth_dev->data;
+ struct cnxk_eth_rxq_sp *rxq;
+ uint16_t mtu;
+ int rc;
+
+ rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
+ /* Setup scatter mode if needed by jumbo */
+ nix_enable_mseg_on_jumbo(rxq);
+
+ /* Setup MTU based on max_rx_pkt_len */
+ mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
+ CNXK_NIX_MAX_VTAG_ACT_SIZE;
+
+ rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ if (rc)
+ plt_err("Failed to set default MTU size, rc=%d", rc);
+
+ return rc;
+}
+
+static int
+nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_conf = {0};
+ int rc;
+
+ /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
+ * by AF driver, update those info in PMD structure.
+ */
+ rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
+ if (rc)
+ goto exit;
+
+ fc->mode = fc_conf.mode;
+ fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_RX_PAUSE);
+ fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
+ (fc_conf.mode == RTE_FC_TX_PAUSE);
+
+exit:
+ return rc;
+}
+
+static int
+nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct cnxk_fc_cfg *fc = &dev->fc_cfg;
+ struct rte_eth_fc_conf fc_cfg = {0};
+
+ if (roc_nix_is_vf_or_sdp(&dev->nix))
+ return 0;
+
+ fc_cfg.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (roc_model_is_cn96_ax() &&
+ dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
+ (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
+ fc_cfg.mode =
+ (fc_cfg.mode == RTE_FC_FULL ||
+ fc_cfg.mode == RTE_FC_TX_PAUSE) ?
+ RTE_FC_TX_PAUSE : RTE_FC_NONE;
+ }
+
+ return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
+uint64_t
+cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
+{
+ uint16_t port_id = dev->eth_dev->data->port_id;
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_tx_q_sz,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct roc_nix_sq *sq;
+ size_t txq_sz;
+ int rc;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[qid] != NULL) {
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->tx_queue_release(eth_dev, qid);
+ eth_dev->data->tx_queues[qid] = NULL;
+ }
+
+ /* When Tx Security offload is enabled, increase tx desc count by
+ * max possible outbound desc count.
+ */
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
+ nb_desc += dev->outb.nb_desc;
+
+ /* Setup ROC SQ */
+ sq = &dev->sqs[qid];
+ sq->qid = qid;
+ sq->nb_desc = nb_desc;
+ sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+ rc = roc_nix_sq_init(&dev->nix, sq);
+ if (rc) {
+ plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+ txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+ if (!txq_sp) {
+ plt_err("Failed to alloc tx queue mem");
+ rc |= roc_nix_sq_fini(sq);
+ return rc;
+ }
+
+ txq_sp->dev = dev;
+ txq_sp->qid = qid;
+ txq_sp->qconf.conf.tx = *tx_conf;
+ /* Queue config should reflect global offloads */
+ txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
+ txq_sp->qconf.nb_desc = nb_desc;
+
+ plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+ " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+ qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+ sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+ /* Store start of fast path area */
+ eth_dev->data->tx_queues[qid] = txq_sp + 1;
+ eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ void *txq = eth_dev->data->tx_queues[qid];
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_sq *sq;
+ int rc;
+
+ if (!txq)
+ return;
+
+ txq_sp = cnxk_eth_txq_to_sp(txq);
+
+ dev = txq_sp->dev;
+
+ plt_nix_dbg("Releasing txq %u", qid);
+
+ /* Cleanup ROC SQ */
+ sq = &dev->sqs[qid];
+ rc = roc_nix_sq_fini(sq);
+ if (rc)
+ plt_err("Failed to cleanup sq, rc=%d", rc);
+
+ /* Finally free */
+ plt_free(txq_sp);
+}
+
+int
+cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_mempool_ops *ops;
+ const char *platform_ops;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t first_skip;
+ int rc = -EINVAL;
+ size_t rxq_sz;
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ plt_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs cnxk_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ plt_err("mempool ops should be of cnxk_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ plt_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[qid] != NULL) {
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->rx_queue_release(eth_dev, qid);
+ eth_dev->data->rx_queues[qid] = NULL;
+ }
+
+ /* Clam up cq limit to size of packet pool aura for LBK
+ * to avoid meta packet drop as LBK does not currently support
+ * backpressure.
+ */
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+ uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
+
+ /* Use current RQ's aura limit if inl rq is not available */
+ if (!pkt_pool_limit)
+ pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
+ nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
+ }
+
+ /* Setup ROC CQ */
+ cq = &dev->cqs[qid];
+ cq->qid = qid;
+ cq->nb_desc = nb_desc;
+ rc = roc_nix_cq_init(&dev->nix, cq);
+ if (rc) {
+ plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Setup ROC RQ */
+ rq = &dev->rqs[qid];
+ rq->qid = qid;
+ rq->aura_handle = mp->pool_id;
+ rq->flow_tag_width = 32;
+ rq->sso_ena = false;
+
+ /* Calculate first mbuf skip */
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rq->first_skip = first_skip;
+ rq->later_skip = sizeof(struct rte_mbuf);
+ rq->lpb_size = mp->elt_size;
+
+ /* Enable Inline IPSec on RQ, will not be used for Poll mode */
+ if (roc_nix_inl_inb_is_enabled(nix))
+ rq->ipsech_ena = true;
+
+ rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
+ if (rc) {
+ plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
+ goto cq_fini;
+ }
+
+ /* Allocate and setup fast path rx queue */
+ rc = -ENOMEM;
+ rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
+ rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
+ if (!rxq_sp) {
+ plt_err("Failed to alloc rx queue for rq=%d", qid);
+ goto rq_fini;
+ }
+
+ /* Setup slow path fields */
+ rxq_sp->dev = dev;
+ rxq_sp->qid = qid;
+ rxq_sp->qconf.conf.rx = *rx_conf;
+ /* Queue config should reflect global offloads */
+ rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
+ rxq_sp->qconf.nb_desc = nb_desc;
+ rxq_sp->qconf.mp = mp;
+
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+ /* Setup rq reference for inline dev if present */
+ rc = roc_nix_inl_dev_rq_get(rq);
+ if (rc)
+ goto free_mem;
+ }
+
+ plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
+ cq->nb_desc);
+
+ /* Store start of fast path area */
+ eth_dev->data->rx_queues[qid] = rxq_sp + 1;
+ eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
+ return 0;
+free_mem:
+ plt_free(rxq_sp);
+rq_fini:
+ rc |= roc_nix_rq_fini(rq);
+cq_fini:
+ rc |= roc_nix_cq_fini(cq);
+fail:
+ return rc;
+}
+
+static void
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ void *rxq = eth_dev->data->rx_queues[qid];
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ int rc;
+
+ if (!rxq)
+ return;
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ dev = rxq_sp->dev;
+ rq = &dev->rqs[qid];
+
+ plt_nix_dbg("Releasing rxq %u", qid);
+
+ /* Release rq reference for inline dev if present */
+ if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
+ roc_nix_inl_dev_rq_put(rq);
+
+ /* Cleanup ROC RQ */
+ rc = roc_nix_rq_fini(rq);
+ if (rc)
+ plt_err("Failed to cleanup rq, rc=%d", rc);
+
+ /* Cleanup ROC CQ */
+ cq = &dev->cqs[qid];
+ rc = roc_nix_cq_fini(cq);
+ if (rc)
+ plt_err("Failed to cleanup cq, rc=%d", rc);
+
+ /* Finally free fast path area */
+ plt_free(rxq_sp);
+}
+