1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix))
12 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
17 static inline uint64_t
18 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
21 return CNXK_NIX_TX_OFFLOAD_CAPA;
24 static inline uint32_t
25 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 /* Auto negotiation disabled */
30 speed_capa = ETH_LINK_SPEED_FIXED;
31 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
32 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
33 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
34 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
41 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
43 uint16_t port_id = dev->eth_dev->data->port_id;
44 struct rte_mbuf mb_def;
47 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
48 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
49 offsetof(struct rte_mbuf, data_off) !=
51 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
52 offsetof(struct rte_mbuf, data_off) !=
54 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
55 offsetof(struct rte_mbuf, data_off) !=
58 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
59 mb_def.port = port_id;
60 rte_mbuf_refcnt_set(&mb_def, 1);
62 /* Prevent compiler reordering: rearm_data covers previous fields */
63 rte_compiler_barrier();
64 tmp = (uint64_t *)&mb_def.rearm_data;
70 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
73 * Maximum three segments can be supported with W8, Choose
74 * NIX_MAXSQESZ_W16 for multi segment offload.
76 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
77 return NIX_MAXSQESZ_W16;
79 return NIX_MAXSQESZ_W8;
83 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
84 uint16_t nb_desc, uint16_t fp_tx_q_sz,
85 const struct rte_eth_txconf *tx_conf)
87 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
88 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
89 struct cnxk_eth_txq_sp *txq_sp;
90 struct roc_nix_sq *sq;
94 /* Free memory prior to re-allocation if needed. */
95 if (eth_dev->data->tx_queues[qid] != NULL) {
96 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
97 dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
98 eth_dev->data->tx_queues[qid] = NULL;
104 sq->nb_desc = nb_desc;
105 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
107 rc = roc_nix_sq_init(&dev->nix, sq);
109 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
114 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
115 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
117 plt_err("Failed to alloc tx queue mem");
118 rc |= roc_nix_sq_fini(sq);
124 txq_sp->qconf.conf.tx = *tx_conf;
125 txq_sp->qconf.nb_desc = nb_desc;
127 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
128 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
129 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
130 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
132 /* Store start of fast path area */
133 eth_dev->data->tx_queues[qid] = txq_sp + 1;
134 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
139 cnxk_nix_tx_queue_release(void *txq)
141 struct cnxk_eth_txq_sp *txq_sp;
142 struct cnxk_eth_dev *dev;
143 struct roc_nix_sq *sq;
150 txq_sp = cnxk_eth_txq_to_sp(txq);
154 plt_nix_dbg("Releasing txq %u", qid);
158 rc = roc_nix_sq_fini(sq);
160 plt_err("Failed to cleanup sq, rc=%d", rc);
167 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
168 uint16_t nb_desc, uint16_t fp_rx_q_sz,
169 const struct rte_eth_rxconf *rx_conf,
170 struct rte_mempool *mp)
172 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
173 struct cnxk_eth_rxq_sp *rxq_sp;
174 struct rte_mempool_ops *ops;
175 const char *platform_ops;
176 struct roc_nix_rq *rq;
177 struct roc_nix_cq *cq;
183 if (rx_conf->rx_deferred_start == 1) {
184 plt_err("Deferred Rx start is not supported");
188 platform_ops = rte_mbuf_platform_mempool_ops();
189 /* This driver needs cnxk_npa mempool ops to work */
190 ops = rte_mempool_get_ops(mp->ops_index);
191 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
192 plt_err("mempool ops should be of cnxk_npa type");
196 if (mp->pool_id == 0) {
197 plt_err("Invalid pool_id");
201 /* Free memory prior to re-allocation if needed */
202 if (eth_dev->data->rx_queues[qid] != NULL) {
203 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
205 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
206 dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
207 eth_dev->data->rx_queues[qid] = NULL;
213 cq->nb_desc = nb_desc;
214 rc = roc_nix_cq_init(&dev->nix, cq);
216 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
223 rq->aura_handle = mp->pool_id;
224 rq->flow_tag_width = 32;
227 /* Calculate first mbuf skip */
228 first_skip = (sizeof(struct rte_mbuf));
229 first_skip += RTE_PKTMBUF_HEADROOM;
230 first_skip += rte_pktmbuf_priv_size(mp);
231 rq->first_skip = first_skip;
232 rq->later_skip = sizeof(struct rte_mbuf);
233 rq->lpb_size = mp->elt_size;
235 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
237 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
241 /* Allocate and setup fast path rx queue */
243 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
244 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
246 plt_err("Failed to alloc rx queue for rq=%d", qid);
250 /* Setup slow path fields */
253 rxq_sp->qconf.conf.rx = *rx_conf;
254 rxq_sp->qconf.nb_desc = nb_desc;
255 rxq_sp->qconf.mp = mp;
257 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
260 /* Store start of fast path area */
261 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
262 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
266 rc |= roc_nix_rq_fini(rq);
268 rc |= roc_nix_cq_fini(cq);
274 cnxk_nix_rx_queue_release(void *rxq)
276 struct cnxk_eth_rxq_sp *rxq_sp;
277 struct cnxk_eth_dev *dev;
278 struct roc_nix_rq *rq;
279 struct roc_nix_cq *cq;
286 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
290 plt_nix_dbg("Releasing rxq %u", qid);
294 rc = roc_nix_rq_fini(rq);
296 plt_err("Failed to cleanup rq, rc=%d", rc);
300 rc = roc_nix_cq_fini(cq);
302 plt_err("Failed to cleanup cq, rc=%d", rc);
304 /* Finally free fast path area */
309 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
312 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
313 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
314 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
315 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
316 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
317 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
318 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
319 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
320 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
321 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
322 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
323 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
325 uint32_t flowkey_cfg = 0;
327 dev->ethdev_rss_hf = ethdev_rss;
329 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
330 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
332 if (ethdev_rss & ETH_RSS_C_VLAN)
333 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
335 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
336 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
338 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
339 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
341 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
342 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
344 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
345 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
347 if (ethdev_rss & RSS_IPV4_ENABLE)
348 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
350 if (ethdev_rss & RSS_IPV6_ENABLE)
351 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
353 if (ethdev_rss & ETH_RSS_TCP)
354 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
356 if (ethdev_rss & ETH_RSS_UDP)
357 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
359 if (ethdev_rss & ETH_RSS_SCTP)
360 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
362 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
363 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
365 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
366 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
368 if (ethdev_rss & ETH_RSS_PORT)
369 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
371 if (ethdev_rss & ETH_RSS_NVGRE)
372 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
374 if (ethdev_rss & ETH_RSS_VXLAN)
375 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
377 if (ethdev_rss & ETH_RSS_GENEVE)
378 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
380 if (ethdev_rss & ETH_RSS_GTPU)
381 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
387 nix_free_queue_mem(struct cnxk_eth_dev *dev)
398 nix_rss_default_setup(struct cnxk_eth_dev *dev)
400 struct rte_eth_dev *eth_dev = dev->eth_dev;
401 uint8_t rss_hash_level;
402 uint32_t flowkey_cfg;
405 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
406 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
410 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
411 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
415 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
417 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
418 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
419 struct cnxk_eth_qconf *tx_qconf = NULL;
420 struct cnxk_eth_qconf *rx_qconf = NULL;
421 struct cnxk_eth_rxq_sp *rxq_sp;
422 struct cnxk_eth_txq_sp *txq_sp;
423 int i, nb_rxq, nb_txq;
426 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
427 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
429 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
430 if (tx_qconf == NULL) {
431 plt_err("Failed to allocate memory for tx_qconf");
435 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
436 if (rx_qconf == NULL) {
437 plt_err("Failed to allocate memory for rx_qconf");
441 txq = eth_dev->data->tx_queues;
442 for (i = 0; i < nb_txq; i++) {
443 if (txq[i] == NULL) {
444 tx_qconf[i].valid = false;
445 plt_info("txq[%d] is already released", i);
448 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
449 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
450 tx_qconf[i].valid = true;
451 dev_ops->tx_queue_release(txq[i]);
452 eth_dev->data->tx_queues[i] = NULL;
455 rxq = eth_dev->data->rx_queues;
456 for (i = 0; i < nb_rxq; i++) {
457 if (rxq[i] == NULL) {
458 rx_qconf[i].valid = false;
459 plt_info("rxq[%d] is already released", i);
462 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
463 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
464 rx_qconf[i].valid = true;
465 dev_ops->rx_queue_release(rxq[i]);
466 eth_dev->data->rx_queues[i] = NULL;
469 dev->tx_qconf = tx_qconf;
470 dev->rx_qconf = rx_qconf;
480 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
482 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
483 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
484 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
485 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
486 int rc, i, nb_rxq, nb_txq;
489 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
490 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
493 /* Setup tx & rx queues with previous configuration so
494 * that the queues can be functional in cases like ports
495 * are started without re configuring queues.
497 * Usual re config sequence is like below:
503 * queue_configure() {
510 * In some application's control path, queue_configure() would
511 * NOT be invoked for TXQs/RXQs in port_configure().
512 * In such cases, queues can be functional after start as the
513 * queues are already setup in port_configure().
515 for (i = 0; i < nb_txq; i++) {
516 if (!tx_qconf[i].valid)
518 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
519 &tx_qconf[i].conf.tx);
521 plt_err("Failed to setup tx queue rc=%d", rc);
522 txq = eth_dev->data->tx_queues;
523 for (i -= 1; i >= 0; i--)
524 dev_ops->tx_queue_release(txq[i]);
532 for (i = 0; i < nb_rxq; i++) {
533 if (!rx_qconf[i].valid)
535 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
536 &rx_qconf[i].conf.rx,
539 plt_err("Failed to setup rx queue rc=%d", rc);
540 rxq = eth_dev->data->rx_queues;
541 for (i -= 1; i >= 0; i--)
542 dev_ops->rx_queue_release(rxq[i]);
543 goto tx_queue_release;
553 txq = eth_dev->data->tx_queues;
554 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
555 dev_ops->tx_queue_release(txq[i]);
566 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
576 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
578 /* These dummy functions are required for supporting
579 * some applications which reconfigure queues without
580 * stopping tx burst and rx burst threads(eg kni app)
581 * When the queues context is saved, txq/rxqs are released
582 * which caused app crash since rx/tx burst is still
583 * on different lcores
585 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
586 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
591 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
593 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
594 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
595 struct roc_nix *nix = &dev->nix;
598 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
602 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
603 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
604 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
605 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
607 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
608 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
609 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
610 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
615 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
617 struct roc_nix *nix = &dev->nix;
620 /* Nothing much to do if offload is not enabled */
621 if (!(dev->tx_offloads &
622 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
623 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
626 /* Setup LSO formats in AF. Its a no-op if other ethdev has
629 rc = roc_nix_lso_fmt_setup(nix);
633 return nix_lso_tun_fmt_update(dev);
637 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
639 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
640 struct rte_eth_dev_data *data = eth_dev->data;
641 struct rte_eth_conf *conf = &data->dev_conf;
642 struct rte_eth_rxmode *rxmode = &conf->rxmode;
643 struct rte_eth_txmode *txmode = &conf->txmode;
644 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
645 struct roc_nix *nix = &dev->nix;
646 struct rte_ether_addr *ea;
647 uint8_t nb_rxq, nb_txq;
655 if (rte_eal_has_hugepages() == 0) {
656 plt_err("Huge page is not configured");
660 if (conf->dcb_capability_en == 1) {
661 plt_err("dcb enable is not supported");
665 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
666 plt_err("Flow director is not supported");
670 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
671 rxmode->mq_mode != ETH_MQ_RX_RSS) {
672 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
676 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
677 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
681 /* Free the resources allocated from the previous configure */
682 if (dev->configured == 1) {
683 /* Unregister queue irq's */
684 roc_nix_unregister_queue_irqs(nix);
686 /* Unregister CQ irqs if present */
687 if (eth_dev->data->dev_conf.intr_conf.rxq)
688 roc_nix_unregister_cq_irqs(nix);
690 /* Set no-op functions */
691 nix_set_nop_rxtx_function(eth_dev);
692 /* Store queue config for later */
693 rc = nix_store_queue_cfg_and_then_release(eth_dev);
696 roc_nix_tm_fini(nix);
697 roc_nix_lf_free(nix);
700 dev->rx_offloads = rxmode->offloads;
701 dev->tx_offloads = txmode->offloads;
704 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
705 if (dev->rx_offloads &
706 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
707 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
708 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
710 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
711 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
712 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
714 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
715 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
718 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
720 plt_err("Failed to init nix_lf rc=%d", rc);
724 nb_rxq = data->nb_rx_queues;
725 nb_txq = data->nb_tx_queues;
728 /* Allocate memory for roc rq's and cq's */
729 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
731 plt_err("Failed to alloc rqs");
736 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
738 plt_err("Failed to alloc cqs");
745 /* Allocate memory for roc sq's */
746 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
748 plt_err("Failed to alloc sqs");
754 /* Re-enable NIX LF error interrupts */
755 roc_nix_err_intr_ena_dis(nix, true);
756 roc_nix_ras_intr_ena_dis(nix, true);
758 if (nix->rx_ptp_ena) {
759 plt_err("Both PTP and switch header enabled");
763 /* Setup LSO if needed */
764 rc = nix_lso_fmt_setup(dev);
766 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
771 rc = nix_rss_default_setup(dev);
773 plt_err("Failed to configure rss rc=%d", rc);
777 /* Init the default TM scheduler hierarchy */
778 rc = roc_nix_tm_init(nix);
780 plt_err("Failed to init traffic manager, rc=%d", rc);
784 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
786 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
790 /* Register queue IRQs */
791 rc = roc_nix_register_queue_irqs(nix);
793 plt_err("Failed to register queue interrupts rc=%d", rc);
797 /* Register cq IRQs */
798 if (eth_dev->data->dev_conf.intr_conf.rxq) {
799 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
800 plt_err("Rx interrupt cannot be enabled, rxq > %d",
804 /* Rx interrupt feature cannot work with vector mode because,
805 * vector mode does not process packets unless min 4 pkts are
806 * received, while cq interrupts are generated even for 1 pkt
809 dev->scalar_ena = true;
811 rc = roc_nix_register_cq_irqs(nix);
813 plt_err("Failed to register CQ interrupts rc=%d", rc);
818 /* Configure loop back mode */
819 rc = roc_nix_mac_loopback_enable(nix,
820 eth_dev->data->dev_conf.lpbk_mode);
822 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
827 * Restore queue config when reconfigure followed by
828 * reconfigure and no queue configure invoked from application case.
830 if (dev->configured == 1) {
831 rc = nix_restore_queue_cfg(eth_dev);
836 /* Update the mac address */
837 ea = eth_dev->data->mac_addrs;
838 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
839 if (rte_is_zero_ether_addr(ea))
840 rte_eth_random_addr((uint8_t *)ea);
842 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
844 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
845 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
846 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
847 dev->rx_offloads, dev->tx_offloads);
851 dev->nb_rxq = data->nb_rx_queues;
852 dev->nb_txq = data->nb_tx_queues;
856 roc_nix_unregister_cq_irqs(nix);
858 roc_nix_unregister_queue_irqs(nix);
860 roc_nix_tm_fini(nix);
862 nix_free_queue_mem(dev);
863 rc |= roc_nix_lf_free(nix);
869 /* CNXK platform independent eth dev ops */
870 struct eth_dev_ops cnxk_eth_dev_ops = {
871 .dev_infos_get = cnxk_nix_info_get,
872 .link_update = cnxk_nix_link_update,
873 .tx_queue_release = cnxk_nix_tx_queue_release,
874 .rx_queue_release = cnxk_nix_rx_queue_release,
878 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
880 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
881 struct roc_nix *nix = &dev->nix;
882 struct rte_pci_device *pci_dev;
885 eth_dev->dev_ops = &cnxk_eth_dev_ops;
887 /* For secondary processes, the primary has done all the work */
888 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
891 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
892 rte_eth_copy_pci_info(eth_dev, pci_dev);
894 /* Parse devargs string */
895 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
897 plt_err("Failed to parse devargs rc=%d", rc);
901 /* Initialize base roc nix */
902 nix->pci_dev = pci_dev;
903 rc = roc_nix_dev_init(nix);
905 plt_err("Failed to initialize roc nix rc=%d", rc);
909 /* Register up msg callbacks */
910 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
912 dev->eth_dev = eth_dev;
915 /* For vfs, returned max_entries will be 0. but to keep default mac
916 * address, one entry must be allocated. so setting up to 1.
918 if (roc_nix_is_vf_or_sdp(nix))
921 max_entries = roc_nix_mac_max_entries_get(nix);
923 if (max_entries <= 0) {
924 plt_err("Failed to get max entries for mac addr");
929 eth_dev->data->mac_addrs =
930 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
931 if (eth_dev->data->mac_addrs == NULL) {
932 plt_err("Failed to allocate memory for mac addr");
937 dev->max_mac_entries = max_entries;
939 /* Get mac address */
940 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
942 plt_err("Failed to get mac addr, rc=%d", rc);
946 /* Update the mac address */
947 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
949 if (!roc_nix_is_vf_or_sdp(nix)) {
950 /* Sync same MAC address to CGX/RPM table */
951 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
953 plt_err("Failed to set mac addr, rc=%d", rc);
958 /* Union of all capabilities supported by CNXK.
959 * Platform specific capabilities will be
962 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
963 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
964 dev->speed_capa = nix_get_speed_capa(dev);
966 /* Initialize roc npc */
967 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
968 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
969 eth_dev->data->port_id, roc_nix_get_pf(nix),
970 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
971 dev->rx_offload_capa, dev->tx_offload_capa);
975 rte_free(eth_dev->data->mac_addrs);
977 roc_nix_dev_fini(nix);
979 plt_err("Failed to init nix eth_dev rc=%d", rc);
984 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
986 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
987 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
988 struct roc_nix *nix = &dev->nix;
991 /* Nothing to be done for secondary processes */
992 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
995 /* Clear the flag since we are closing down */
998 roc_nix_npc_rx_ena_dis(nix, false);
1000 /* Disable link status events */
1001 roc_nix_mac_link_event_start_stop(nix, false);
1004 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1005 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
1006 eth_dev->data->tx_queues[i] = NULL;
1008 eth_dev->data->nb_tx_queues = 0;
1010 /* Free up RQ's and CQ's */
1011 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1012 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
1013 eth_dev->data->rx_queues[i] = NULL;
1015 eth_dev->data->nb_rx_queues = 0;
1017 /* Free tm resources */
1018 roc_nix_tm_fini(nix);
1020 /* Unregister queue irqs */
1021 roc_nix_unregister_queue_irqs(nix);
1023 /* Unregister cq irqs */
1024 if (eth_dev->data->dev_conf.intr_conf.rxq)
1025 roc_nix_unregister_cq_irqs(nix);
1027 /* Free ROC RQ's, SQ's and CQ's memory */
1028 nix_free_queue_mem(dev);
1030 /* Free nix lf resources */
1031 rc = roc_nix_lf_free(nix);
1033 plt_err("Failed to free nix lf, rc=%d", rc);
1035 rte_free(eth_dev->data->mac_addrs);
1036 eth_dev->data->mac_addrs = NULL;
1038 /* Check if mbox close is needed */
1042 rc = roc_nix_dev_fini(nix);
1043 /* Can be freed later by PMD if NPA LF is in use */
1044 if (rc == -EAGAIN) {
1045 eth_dev->data->dev_private = NULL;
1048 plt_err("Failed in nix dev fini, rc=%d", rc);
1055 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1057 struct rte_eth_dev *eth_dev;
1058 struct roc_nix *nix;
1061 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1063 /* Cleanup eth dev */
1064 rc = cnxk_eth_dev_uninit(eth_dev, true);
1068 rte_eth_dev_release_port(eth_dev);
1071 /* Nothing to be done for secondary processes */
1072 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1075 /* Check if this device is hosting common resource */
1076 nix = roc_idev_npa_nix_get();
1077 if (nix->pci_dev != pci_dev)
1080 /* Try nix fini now */
1081 rc = roc_nix_dev_fini(nix);
1082 if (rc == -EAGAIN) {
1083 plt_info("%s: common resource in use by other devices",
1087 plt_err("Failed in nix dev fini, rc=%d", rc);
1091 /* Free device pointer as rte_ethdev does not have it anymore */
1098 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1102 RTE_SET_USED(pci_drv);
1104 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1107 /* On error on secondary, recheck if port exists in primary or
1108 * in mid of detach state.
1110 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1111 if (!rte_eth_dev_allocated(pci_dev->device.name))