+ return 0;
+
+ fc_cfg.mode = fc->mode;
+
+ /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
+ if (roc_model_is_cn96_ax() &&
+ dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
+ (fc_cfg.mode == RTE_ETH_FC_FULL || fc_cfg.mode == RTE_ETH_FC_RX_PAUSE)) {
+ fc_cfg.mode =
+ (fc_cfg.mode == RTE_ETH_FC_FULL ||
+ fc_cfg.mode == RTE_ETH_FC_TX_PAUSE) ?
+ RTE_ETH_FC_TX_PAUSE : RTE_ETH_FC_NONE;
+ }
+
+ return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
+}
+
+uint64_t
+cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
+{
+ uint16_t port_id = dev->eth_dev->data->port_id;
+ struct rte_mbuf mb_def;
+ uint64_t *tmp;
+
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 2);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 4);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+ offsetof(struct rte_mbuf, data_off) !=
+ 6);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM +
+ (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
+ mb_def.port = port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ tmp = (uint64_t *)&mb_def.rearm_data;
+
+ return *tmp;
+}
+
+static inline uint8_t
+nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
+{
+ /*
+ * Maximum three segments can be supported with W8, Choose
+ * NIX_MAXSQESZ_W16 for multi segment offload.
+ */
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_MULTI_SEGS)
+ return NIX_MAXSQESZ_W16;
+ else
+ return NIX_MAXSQESZ_W8;
+}
+
+int
+cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_tx_q_sz,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct roc_nix_sq *sq;
+ size_t txq_sz;
+ int rc;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (eth_dev->data->tx_queues[qid] != NULL) {
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->tx_queue_release(eth_dev, qid);
+ eth_dev->data->tx_queues[qid] = NULL;
+ }
+
+ /* When Tx Security offload is enabled, increase tx desc count by
+ * max possible outbound desc count.
+ */
+ if (dev->tx_offloads & RTE_ETH_TX_OFFLOAD_SECURITY)
+ nb_desc += dev->outb.nb_desc;
+
+ /* Setup ROC SQ */
+ sq = &dev->sqs[qid];
+ sq->qid = qid;
+ sq->nb_desc = nb_desc;
+ sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
+
+ rc = roc_nix_sq_init(&dev->nix, sq);
+ if (rc) {
+ plt_err("Failed to init sq=%d, rc=%d", qid, rc);
+ return rc;
+ }
+
+ rc = -ENOMEM;
+ txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
+ txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
+ if (!txq_sp) {
+ plt_err("Failed to alloc tx queue mem");
+ rc |= roc_nix_sq_fini(sq);
+ return rc;
+ }
+
+ txq_sp->dev = dev;
+ txq_sp->qid = qid;
+ txq_sp->qconf.conf.tx = *tx_conf;
+ /* Queue config should reflect global offloads */
+ txq_sp->qconf.conf.tx.offloads = dev->tx_offloads;
+ txq_sp->qconf.nb_desc = nb_desc;
+
+ plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
+ " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
+ qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
+ sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
+
+ /* Store start of fast path area */
+ eth_dev->data->tx_queues[qid] = txq_sp + 1;
+ eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+ return 0;
+}
+
+static void
+cnxk_nix_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ void *txq = eth_dev->data->tx_queues[qid];
+ struct cnxk_eth_txq_sp *txq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_sq *sq;
+ int rc;
+
+ if (!txq)
+ return;
+
+ txq_sp = cnxk_eth_txq_to_sp(txq);
+
+ dev = txq_sp->dev;
+
+ plt_nix_dbg("Releasing txq %u", qid);
+
+ /* Cleanup ROC SQ */
+ sq = &dev->sqs[qid];
+ rc = roc_nix_sq_fini(sq);
+ if (rc)
+ plt_err("Failed to cleanup sq, rc=%d", rc);
+
+ /* Finally free */
+ plt_free(txq_sp);
+}
+
+int
+cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+ uint16_t nb_desc, uint16_t fp_rx_q_sz,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ struct roc_nix *nix = &dev->nix;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct rte_mempool_ops *ops;
+ const char *platform_ops;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ uint16_t first_skip;
+ int rc = -EINVAL;
+ size_t rxq_sz;
+
+ /* Sanity checks */
+ if (rx_conf->rx_deferred_start == 1) {
+ plt_err("Deferred Rx start is not supported");
+ goto fail;
+ }
+
+ platform_ops = rte_mbuf_platform_mempool_ops();
+ /* This driver needs cnxk_npa mempool ops to work */
+ ops = rte_mempool_get_ops(mp->ops_index);
+ if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+ plt_err("mempool ops should be of cnxk_npa type");
+ goto fail;
+ }
+
+ if (mp->pool_id == 0) {
+ plt_err("Invalid pool_id");
+ goto fail;
+ }
+
+ /* Free memory prior to re-allocation if needed */
+ if (eth_dev->data->rx_queues[qid] != NULL) {
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+
+ plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+ dev_ops->rx_queue_release(eth_dev, qid);
+ eth_dev->data->rx_queues[qid] = NULL;
+ }
+
+ /* Clam up cq limit to size of packet pool aura for LBK
+ * to avoid meta packet drop as LBK does not currently support
+ * backpressure.
+ */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
+ uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
+
+ /* Use current RQ's aura limit if inl rq is not available */
+ if (!pkt_pool_limit)
+ pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
+ nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
+ }
+
+ /* Setup ROC CQ */
+ cq = &dev->cqs[qid];
+ cq->qid = qid;
+ cq->nb_desc = nb_desc;
+ rc = roc_nix_cq_init(&dev->nix, cq);
+ if (rc) {
+ plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
+ goto fail;
+ }
+
+ /* Setup ROC RQ */
+ rq = &dev->rqs[qid];
+ rq->qid = qid;
+ rq->aura_handle = mp->pool_id;
+ rq->flow_tag_width = 32;
+ rq->sso_ena = false;
+
+ /* Calculate first mbuf skip */
+ first_skip = (sizeof(struct rte_mbuf));
+ first_skip += RTE_PKTMBUF_HEADROOM;
+ first_skip += rte_pktmbuf_priv_size(mp);
+ rq->first_skip = first_skip;
+ rq->later_skip = sizeof(struct rte_mbuf);
+ rq->lpb_size = mp->elt_size;
+
+ /* Enable Inline IPSec on RQ, will not be used for Poll mode */
+ if (roc_nix_inl_inb_is_enabled(nix))
+ rq->ipsech_ena = true;
+
+ rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
+ if (rc) {
+ plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
+ goto cq_fini;
+ }
+
+ /* Allocate and setup fast path rx queue */
+ rc = -ENOMEM;
+ rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
+ rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
+ if (!rxq_sp) {
+ plt_err("Failed to alloc rx queue for rq=%d", qid);
+ goto rq_fini;
+ }
+
+ /* Setup slow path fields */
+ rxq_sp->dev = dev;
+ rxq_sp->qid = qid;
+ rxq_sp->qconf.conf.rx = *rx_conf;
+ /* Queue config should reflect global offloads */
+ rxq_sp->qconf.conf.rx.offloads = dev->rx_offloads;
+ rxq_sp->qconf.nb_desc = nb_desc;
+ rxq_sp->qconf.mp = mp;
+
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY) {
+ /* Setup rq reference for inline dev if present */
+ rc = roc_nix_inl_dev_rq_get(rq);
+ if (rc)
+ goto free_mem;
+ }
+
+ plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
+ cq->nb_desc);
+
+ /* Store start of fast path area */
+ eth_dev->data->rx_queues[qid] = rxq_sp + 1;
+ eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ /* Calculating delta and freq mult between PTP HI clock and tsc.
+ * These are needed in deriving raw clock value from tsc counter.
+ * read_clock eth op returns raw clock value.
+ */
+ if ((dev->rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
+ rc = cnxk_nix_tsc_convert(dev);
+ if (rc) {
+ plt_err("Failed to calculate delta and freq mult");
+ goto rq_fini;
+ }
+ }
+
+ return 0;
+free_mem:
+ plt_free(rxq_sp);
+rq_fini:
+ rc |= roc_nix_rq_fini(rq);
+cq_fini:
+ rc |= roc_nix_cq_fini(cq);
+fail:
+ return rc;
+}
+
+static void
+cnxk_nix_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
+{
+ void *rxq = eth_dev->data->rx_queues[qid];
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct cnxk_eth_dev *dev;
+ struct roc_nix_rq *rq;
+ struct roc_nix_cq *cq;
+ int rc;
+
+ if (!rxq)
+ return;
+
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+ dev = rxq_sp->dev;
+ rq = &dev->rqs[qid];
+
+ plt_nix_dbg("Releasing rxq %u", qid);
+
+ /* Release rq reference for inline dev if present */
+ if (dev->rx_offloads & RTE_ETH_RX_OFFLOAD_SECURITY)
+ roc_nix_inl_dev_rq_put(rq);
+
+ /* Cleanup ROC RQ */
+ rc = roc_nix_rq_fini(rq);
+ if (rc)
+ plt_err("Failed to cleanup rq, rc=%d", rc);
+
+ /* Cleanup ROC CQ */
+ cq = &dev->cqs[qid];
+ rc = roc_nix_cq_fini(cq);
+ if (rc)
+ plt_err("Failed to cleanup cq, rc=%d", rc);
+
+ /* Finally free fast path area */
+ plt_free(rxq_sp);
+}
+
+uint32_t
+cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
+ uint8_t rss_level)
+{
+ uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
+ {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
+ FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
+ {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
+ FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
+ FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
+ {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
+ FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
+ FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
+ FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
+ FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
+ };
+ uint32_t flowkey_cfg = 0;
+
+ dev->ethdev_rss_hf = ethdev_rss;
+
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD &&
+ dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
+ flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
+ }
+
+ if (ethdev_rss & RTE_ETH_RSS_C_VLAN)
+ flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
+
+ if (ethdev_rss & RTE_ETH_RSS_L3_SRC_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
+
+ if (ethdev_rss & RTE_ETH_RSS_L3_DST_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
+
+ if (ethdev_rss & RTE_ETH_RSS_L4_SRC_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
+
+ if (ethdev_rss & RTE_ETH_RSS_L4_DST_ONLY)
+ flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
+
+ if (ethdev_rss & RSS_IPV4_ENABLE)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
+
+ if (ethdev_rss & RSS_IPV6_ENABLE)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
+
+ if (ethdev_rss & RTE_ETH_RSS_TCP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
+
+ if (ethdev_rss & RTE_ETH_RSS_UDP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
+
+ if (ethdev_rss & RTE_ETH_RSS_SCTP)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
+
+ if (ethdev_rss & RTE_ETH_RSS_L2_PAYLOAD)
+ flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
+
+ if (ethdev_rss & RSS_IPV6_EX_ENABLE)
+ flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
+
+ if (ethdev_rss & RTE_ETH_RSS_PORT)
+ flowkey_cfg |= FLOW_KEY_TYPE_PORT;
+
+ if (ethdev_rss & RTE_ETH_RSS_NVGRE)
+ flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
+
+ if (ethdev_rss & RTE_ETH_RSS_VXLAN)
+ flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
+
+ if (ethdev_rss & RTE_ETH_RSS_GENEVE)
+ flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
+
+ if (ethdev_rss & RTE_ETH_RSS_GTPU)
+ flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
+
+ return flowkey_cfg;
+}
+
+static void
+nix_free_queue_mem(struct cnxk_eth_dev *dev)
+{
+ plt_free(dev->rqs);
+ plt_free(dev->cqs);
+ plt_free(dev->sqs);
+ dev->rqs = NULL;
+ dev->cqs = NULL;
+ dev->sqs = NULL;
+}
+
+static int
+nix_ingress_policer_setup(struct cnxk_eth_dev *dev)
+{
+ TAILQ_INIT(&dev->mtr_profiles);
+ TAILQ_INIT(&dev->mtr_policy);
+ TAILQ_INIT(&dev->mtr);
+
+ return 0;
+}
+
+static int
+nix_rss_default_setup(struct cnxk_eth_dev *dev)
+{
+ struct rte_eth_dev *eth_dev = dev->eth_dev;
+ uint8_t rss_hash_level;
+ uint32_t flowkey_cfg;
+ uint64_t rss_hf;
+
+ rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+ rss_hash_level = RTE_ETH_RSS_LEVEL(rss_hf);
+ if (rss_hash_level)
+ rss_hash_level -= 1;
+
+ flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
+ return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
+}
+
+static int
+nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_qconf *tx_qconf = NULL;
+ struct cnxk_eth_qconf *rx_qconf = NULL;
+ struct cnxk_eth_rxq_sp *rxq_sp;
+ struct cnxk_eth_txq_sp *txq_sp;
+ int i, nb_rxq, nb_txq;
+ void **txq, **rxq;
+
+ nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
+
+ tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
+ if (tx_qconf == NULL) {
+ plt_err("Failed to allocate memory for tx_qconf");
+ goto fail;
+ }
+
+ rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
+ if (rx_qconf == NULL) {
+ plt_err("Failed to allocate memory for rx_qconf");
+ goto fail;
+ }
+
+ txq = eth_dev->data->tx_queues;
+ for (i = 0; i < nb_txq; i++) {
+ if (txq[i] == NULL) {
+ tx_qconf[i].valid = false;
+ plt_info("txq[%d] is already released", i);
+ continue;
+ }
+ txq_sp = cnxk_eth_txq_to_sp(txq[i]);
+ memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
+ tx_qconf[i].valid = true;
+ dev_ops->tx_queue_release(eth_dev, i);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+
+ rxq = eth_dev->data->rx_queues;
+ for (i = 0; i < nb_rxq; i++) {
+ if (rxq[i] == NULL) {
+ rx_qconf[i].valid = false;
+ plt_info("rxq[%d] is already released", i);
+ continue;
+ }
+ rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
+ memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
+ rx_qconf[i].valid = true;
+ dev_ops->rx_queue_release(eth_dev, i);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+
+ dev->tx_qconf = tx_qconf;
+ dev->rx_qconf = rx_qconf;
+ return 0;
+
+fail:
+ free(tx_qconf);
+ free(rx_qconf);
+ return -ENOMEM;
+}
+
+static int
+nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
+{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+ const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+ struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
+ struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
+ int rc, i, nb_rxq, nb_txq;
+
+ nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
+ nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
+
+ rc = -ENOMEM;
+ /* Setup tx & rx queues with previous configuration so
+ * that the queues can be functional in cases like ports
+ * are started without re configuring queues.
+ *
+ * Usual re config sequence is like below:
+ * port_configure() {
+ * if(reconfigure) {
+ * queue_release()
+ * queue_setup()
+ * }
+ * queue_configure() {
+ * queue_release()
+ * queue_setup()
+ * }
+ * }
+ * port_start()
+ *
+ * In some application's control path, queue_configure() would
+ * NOT be invoked for TXQs/RXQs in port_configure().
+ * In such cases, queues can be functional after start as the
+ * queues are already setup in port_configure().
+ */
+ for (i = 0; i < nb_txq; i++) {
+ if (!tx_qconf[i].valid)
+ continue;
+ rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
+ &tx_qconf[i].conf.tx);
+ if (rc) {
+ plt_err("Failed to setup tx queue rc=%d", rc);
+ for (i -= 1; i >= 0; i--)
+ dev_ops->tx_queue_release(eth_dev, i);
+ goto fail;
+ }
+ }
+
+ free(tx_qconf);
+ tx_qconf = NULL;
+
+ for (i = 0; i < nb_rxq; i++) {
+ if (!rx_qconf[i].valid)
+ continue;
+ rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
+ &rx_qconf[i].conf.rx,
+ rx_qconf[i].mp);
+ if (rc) {
+ plt_err("Failed to setup rx queue rc=%d", rc);
+ for (i -= 1; i >= 0; i--)
+ dev_ops->rx_queue_release(eth_dev, i);
+ goto tx_queue_release;
+ }
+ }
+
+ free(rx_qconf);
+ rx_qconf = NULL;
+
+ return 0;
+
+tx_queue_release:
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
+ dev_ops->tx_queue_release(eth_dev, i);
+fail:
+ if (tx_qconf)
+ free(tx_qconf);
+ if (rx_qconf)
+ free(rx_qconf);
+
+ return rc;
+}
+
+static uint16_t
+nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
+{
+ RTE_SET_USED(queue);
+ RTE_SET_USED(mbufs);
+ RTE_SET_USED(pkts);
+
+ return 0;
+}
+
+static void
+nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
+{
+ /* These dummy functions are required for supporting
+ * some applications which reconfigure queues without
+ * stopping tx burst and rx burst threads(eg kni app)
+ * When the queues context is saved, txq/rxqs are released
+ * which caused app crash since rx/tx burst is still
+ * on different lcores
+ */
+ eth_dev->tx_pkt_burst = nix_eth_nop_burst;
+ eth_dev->rx_pkt_burst = nix_eth_nop_burst;
+ rte_mb();
+}
+
+static int
+nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
+{
+ uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
+ uint8_t tun[ROC_NIX_LSO_TUN_MAX];
+ struct roc_nix *nix = &dev->nix;
+ int rc;
+
+ rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
+ if (rc)
+ return rc;
+
+ dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
+ (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
+ (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
+ (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
+
+ dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
+ (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
+ (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
+ (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
+ return 0;
+}
+
+static int
+nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
+{
+ struct roc_nix *nix = &dev->nix;
+ int rc;