1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix))
12 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
17 static inline uint64_t
18 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
21 return CNXK_NIX_TX_OFFLOAD_CAPA;
24 static inline uint32_t
25 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 /* Auto negotiation disabled */
30 speed_capa = ETH_LINK_SPEED_FIXED;
31 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
32 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
33 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
34 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
41 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
43 struct rte_pktmbuf_pool_private *mbp_priv;
44 struct rte_eth_dev *eth_dev;
45 struct cnxk_eth_dev *dev;
49 eth_dev = dev->eth_dev;
51 /* Get rx buffer size */
52 mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
53 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
55 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
56 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
57 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
62 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
64 struct rte_eth_dev_data *data = eth_dev->data;
65 struct cnxk_eth_rxq_sp *rxq;
69 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
70 /* Setup scatter mode if needed by jumbo */
71 nix_enable_mseg_on_jumbo(rxq);
73 /* Setup MTU based on max_rx_pkt_len */
74 mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
75 CNXK_NIX_MAX_VTAG_ACT_SIZE;
77 rc = cnxk_nix_mtu_set(eth_dev, mtu);
79 plt_err("Failed to set default MTU size, rc=%d", rc);
85 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
87 uint16_t port_id = dev->eth_dev->data->port_id;
88 struct rte_mbuf mb_def;
91 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
92 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
93 offsetof(struct rte_mbuf, data_off) !=
95 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
96 offsetof(struct rte_mbuf, data_off) !=
98 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
99 offsetof(struct rte_mbuf, data_off) !=
102 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
103 mb_def.port = port_id;
104 rte_mbuf_refcnt_set(&mb_def, 1);
106 /* Prevent compiler reordering: rearm_data covers previous fields */
107 rte_compiler_barrier();
108 tmp = (uint64_t *)&mb_def.rearm_data;
113 static inline uint8_t
114 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
117 * Maximum three segments can be supported with W8, Choose
118 * NIX_MAXSQESZ_W16 for multi segment offload.
120 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
121 return NIX_MAXSQESZ_W16;
123 return NIX_MAXSQESZ_W8;
127 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
128 uint16_t nb_desc, uint16_t fp_tx_q_sz,
129 const struct rte_eth_txconf *tx_conf)
131 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
132 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
133 struct cnxk_eth_txq_sp *txq_sp;
134 struct roc_nix_sq *sq;
138 /* Free memory prior to re-allocation if needed. */
139 if (eth_dev->data->tx_queues[qid] != NULL) {
140 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
141 dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
142 eth_dev->data->tx_queues[qid] = NULL;
148 sq->nb_desc = nb_desc;
149 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
151 rc = roc_nix_sq_init(&dev->nix, sq);
153 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
158 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
159 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
161 plt_err("Failed to alloc tx queue mem");
162 rc |= roc_nix_sq_fini(sq);
168 txq_sp->qconf.conf.tx = *tx_conf;
169 txq_sp->qconf.nb_desc = nb_desc;
171 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
172 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
173 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
174 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
176 /* Store start of fast path area */
177 eth_dev->data->tx_queues[qid] = txq_sp + 1;
178 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
183 cnxk_nix_tx_queue_release(void *txq)
185 struct cnxk_eth_txq_sp *txq_sp;
186 struct cnxk_eth_dev *dev;
187 struct roc_nix_sq *sq;
194 txq_sp = cnxk_eth_txq_to_sp(txq);
198 plt_nix_dbg("Releasing txq %u", qid);
202 rc = roc_nix_sq_fini(sq);
204 plt_err("Failed to cleanup sq, rc=%d", rc);
211 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
212 uint16_t nb_desc, uint16_t fp_rx_q_sz,
213 const struct rte_eth_rxconf *rx_conf,
214 struct rte_mempool *mp)
216 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
217 struct cnxk_eth_rxq_sp *rxq_sp;
218 struct rte_mempool_ops *ops;
219 const char *platform_ops;
220 struct roc_nix_rq *rq;
221 struct roc_nix_cq *cq;
227 if (rx_conf->rx_deferred_start == 1) {
228 plt_err("Deferred Rx start is not supported");
232 platform_ops = rte_mbuf_platform_mempool_ops();
233 /* This driver needs cnxk_npa mempool ops to work */
234 ops = rte_mempool_get_ops(mp->ops_index);
235 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
236 plt_err("mempool ops should be of cnxk_npa type");
240 if (mp->pool_id == 0) {
241 plt_err("Invalid pool_id");
245 /* Free memory prior to re-allocation if needed */
246 if (eth_dev->data->rx_queues[qid] != NULL) {
247 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
249 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
250 dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
251 eth_dev->data->rx_queues[qid] = NULL;
257 cq->nb_desc = nb_desc;
258 rc = roc_nix_cq_init(&dev->nix, cq);
260 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
267 rq->aura_handle = mp->pool_id;
268 rq->flow_tag_width = 32;
271 /* Calculate first mbuf skip */
272 first_skip = (sizeof(struct rte_mbuf));
273 first_skip += RTE_PKTMBUF_HEADROOM;
274 first_skip += rte_pktmbuf_priv_size(mp);
275 rq->first_skip = first_skip;
276 rq->later_skip = sizeof(struct rte_mbuf);
277 rq->lpb_size = mp->elt_size;
279 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
281 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
285 /* Allocate and setup fast path rx queue */
287 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
288 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
290 plt_err("Failed to alloc rx queue for rq=%d", qid);
294 /* Setup slow path fields */
297 rxq_sp->qconf.conf.rx = *rx_conf;
298 rxq_sp->qconf.nb_desc = nb_desc;
299 rxq_sp->qconf.mp = mp;
301 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
304 /* Store start of fast path area */
305 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
306 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
310 rc |= roc_nix_rq_fini(rq);
312 rc |= roc_nix_cq_fini(cq);
318 cnxk_nix_rx_queue_release(void *rxq)
320 struct cnxk_eth_rxq_sp *rxq_sp;
321 struct cnxk_eth_dev *dev;
322 struct roc_nix_rq *rq;
323 struct roc_nix_cq *cq;
330 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
334 plt_nix_dbg("Releasing rxq %u", qid);
338 rc = roc_nix_rq_fini(rq);
340 plt_err("Failed to cleanup rq, rc=%d", rc);
344 rc = roc_nix_cq_fini(cq);
346 plt_err("Failed to cleanup cq, rc=%d", rc);
348 /* Finally free fast path area */
353 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
356 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
357 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
358 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
359 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
360 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
361 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
362 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
363 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
364 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
365 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
366 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
367 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
369 uint32_t flowkey_cfg = 0;
371 dev->ethdev_rss_hf = ethdev_rss;
373 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
374 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
376 if (ethdev_rss & ETH_RSS_C_VLAN)
377 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
379 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
380 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
382 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
383 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
385 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
386 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
388 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
389 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
391 if (ethdev_rss & RSS_IPV4_ENABLE)
392 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
394 if (ethdev_rss & RSS_IPV6_ENABLE)
395 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
397 if (ethdev_rss & ETH_RSS_TCP)
398 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
400 if (ethdev_rss & ETH_RSS_UDP)
401 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
403 if (ethdev_rss & ETH_RSS_SCTP)
404 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
406 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
407 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
409 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
410 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
412 if (ethdev_rss & ETH_RSS_PORT)
413 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
415 if (ethdev_rss & ETH_RSS_NVGRE)
416 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
418 if (ethdev_rss & ETH_RSS_VXLAN)
419 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
421 if (ethdev_rss & ETH_RSS_GENEVE)
422 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
424 if (ethdev_rss & ETH_RSS_GTPU)
425 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
431 nix_free_queue_mem(struct cnxk_eth_dev *dev)
442 nix_rss_default_setup(struct cnxk_eth_dev *dev)
444 struct rte_eth_dev *eth_dev = dev->eth_dev;
445 uint8_t rss_hash_level;
446 uint32_t flowkey_cfg;
449 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
450 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
454 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
455 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
459 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
461 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
462 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
463 struct cnxk_eth_qconf *tx_qconf = NULL;
464 struct cnxk_eth_qconf *rx_qconf = NULL;
465 struct cnxk_eth_rxq_sp *rxq_sp;
466 struct cnxk_eth_txq_sp *txq_sp;
467 int i, nb_rxq, nb_txq;
470 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
471 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
473 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
474 if (tx_qconf == NULL) {
475 plt_err("Failed to allocate memory for tx_qconf");
479 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
480 if (rx_qconf == NULL) {
481 plt_err("Failed to allocate memory for rx_qconf");
485 txq = eth_dev->data->tx_queues;
486 for (i = 0; i < nb_txq; i++) {
487 if (txq[i] == NULL) {
488 tx_qconf[i].valid = false;
489 plt_info("txq[%d] is already released", i);
492 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
493 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
494 tx_qconf[i].valid = true;
495 dev_ops->tx_queue_release(txq[i]);
496 eth_dev->data->tx_queues[i] = NULL;
499 rxq = eth_dev->data->rx_queues;
500 for (i = 0; i < nb_rxq; i++) {
501 if (rxq[i] == NULL) {
502 rx_qconf[i].valid = false;
503 plt_info("rxq[%d] is already released", i);
506 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
507 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
508 rx_qconf[i].valid = true;
509 dev_ops->rx_queue_release(rxq[i]);
510 eth_dev->data->rx_queues[i] = NULL;
513 dev->tx_qconf = tx_qconf;
514 dev->rx_qconf = rx_qconf;
524 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
526 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
527 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
528 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
529 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
530 int rc, i, nb_rxq, nb_txq;
533 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
534 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
537 /* Setup tx & rx queues with previous configuration so
538 * that the queues can be functional in cases like ports
539 * are started without re configuring queues.
541 * Usual re config sequence is like below:
547 * queue_configure() {
554 * In some application's control path, queue_configure() would
555 * NOT be invoked for TXQs/RXQs in port_configure().
556 * In such cases, queues can be functional after start as the
557 * queues are already setup in port_configure().
559 for (i = 0; i < nb_txq; i++) {
560 if (!tx_qconf[i].valid)
562 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
563 &tx_qconf[i].conf.tx);
565 plt_err("Failed to setup tx queue rc=%d", rc);
566 txq = eth_dev->data->tx_queues;
567 for (i -= 1; i >= 0; i--)
568 dev_ops->tx_queue_release(txq[i]);
576 for (i = 0; i < nb_rxq; i++) {
577 if (!rx_qconf[i].valid)
579 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
580 &rx_qconf[i].conf.rx,
583 plt_err("Failed to setup rx queue rc=%d", rc);
584 rxq = eth_dev->data->rx_queues;
585 for (i -= 1; i >= 0; i--)
586 dev_ops->rx_queue_release(rxq[i]);
587 goto tx_queue_release;
597 txq = eth_dev->data->tx_queues;
598 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
599 dev_ops->tx_queue_release(txq[i]);
610 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
620 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
622 /* These dummy functions are required for supporting
623 * some applications which reconfigure queues without
624 * stopping tx burst and rx burst threads(eg kni app)
625 * When the queues context is saved, txq/rxqs are released
626 * which caused app crash since rx/tx burst is still
627 * on different lcores
629 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
630 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
635 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
637 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
638 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
639 struct roc_nix *nix = &dev->nix;
642 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
646 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
647 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
648 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
649 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
651 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
652 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
653 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
654 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
659 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
661 struct roc_nix *nix = &dev->nix;
664 /* Nothing much to do if offload is not enabled */
665 if (!(dev->tx_offloads &
666 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
667 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
670 /* Setup LSO formats in AF. Its a no-op if other ethdev has
673 rc = roc_nix_lso_fmt_setup(nix);
677 return nix_lso_tun_fmt_update(dev);
681 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
683 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
684 struct rte_eth_dev_data *data = eth_dev->data;
685 struct rte_eth_conf *conf = &data->dev_conf;
686 struct rte_eth_rxmode *rxmode = &conf->rxmode;
687 struct rte_eth_txmode *txmode = &conf->txmode;
688 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
689 struct roc_nix *nix = &dev->nix;
690 struct rte_ether_addr *ea;
691 uint8_t nb_rxq, nb_txq;
699 if (rte_eal_has_hugepages() == 0) {
700 plt_err("Huge page is not configured");
704 if (conf->dcb_capability_en == 1) {
705 plt_err("dcb enable is not supported");
709 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
710 plt_err("Flow director is not supported");
714 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
715 rxmode->mq_mode != ETH_MQ_RX_RSS) {
716 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
720 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
721 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
725 /* Free the resources allocated from the previous configure */
726 if (dev->configured == 1) {
727 /* Unregister queue irq's */
728 roc_nix_unregister_queue_irqs(nix);
730 /* Unregister CQ irqs if present */
731 if (eth_dev->data->dev_conf.intr_conf.rxq)
732 roc_nix_unregister_cq_irqs(nix);
734 /* Set no-op functions */
735 nix_set_nop_rxtx_function(eth_dev);
736 /* Store queue config for later */
737 rc = nix_store_queue_cfg_and_then_release(eth_dev);
740 roc_nix_tm_fini(nix);
741 roc_nix_lf_free(nix);
744 dev->rx_offloads = rxmode->offloads;
745 dev->tx_offloads = txmode->offloads;
748 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
749 if (dev->rx_offloads &
750 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
751 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
752 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
754 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
755 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
756 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
758 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
759 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
762 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
764 plt_err("Failed to init nix_lf rc=%d", rc);
768 nb_rxq = data->nb_rx_queues;
769 nb_txq = data->nb_tx_queues;
772 /* Allocate memory for roc rq's and cq's */
773 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
775 plt_err("Failed to alloc rqs");
780 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
782 plt_err("Failed to alloc cqs");
789 /* Allocate memory for roc sq's */
790 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
792 plt_err("Failed to alloc sqs");
798 /* Re-enable NIX LF error interrupts */
799 roc_nix_err_intr_ena_dis(nix, true);
800 roc_nix_ras_intr_ena_dis(nix, true);
802 if (nix->rx_ptp_ena) {
803 plt_err("Both PTP and switch header enabled");
807 /* Setup LSO if needed */
808 rc = nix_lso_fmt_setup(dev);
810 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
815 rc = nix_rss_default_setup(dev);
817 plt_err("Failed to configure rss rc=%d", rc);
821 /* Init the default TM scheduler hierarchy */
822 rc = roc_nix_tm_init(nix);
824 plt_err("Failed to init traffic manager, rc=%d", rc);
828 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
830 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
834 /* Register queue IRQs */
835 rc = roc_nix_register_queue_irqs(nix);
837 plt_err("Failed to register queue interrupts rc=%d", rc);
841 /* Register cq IRQs */
842 if (eth_dev->data->dev_conf.intr_conf.rxq) {
843 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
844 plt_err("Rx interrupt cannot be enabled, rxq > %d",
848 /* Rx interrupt feature cannot work with vector mode because,
849 * vector mode does not process packets unless min 4 pkts are
850 * received, while cq interrupts are generated even for 1 pkt
853 dev->scalar_ena = true;
855 rc = roc_nix_register_cq_irqs(nix);
857 plt_err("Failed to register CQ interrupts rc=%d", rc);
862 /* Configure loop back mode */
863 rc = roc_nix_mac_loopback_enable(nix,
864 eth_dev->data->dev_conf.lpbk_mode);
866 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
871 * Restore queue config when reconfigure followed by
872 * reconfigure and no queue configure invoked from application case.
874 if (dev->configured == 1) {
875 rc = nix_restore_queue_cfg(eth_dev);
880 /* Update the mac address */
881 ea = eth_dev->data->mac_addrs;
882 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
883 if (rte_is_zero_ether_addr(ea))
884 rte_eth_random_addr((uint8_t *)ea);
886 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
888 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
889 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
890 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
891 dev->rx_offloads, dev->tx_offloads);
895 dev->nb_rxq = data->nb_rx_queues;
896 dev->nb_txq = data->nb_tx_queues;
900 roc_nix_unregister_cq_irqs(nix);
902 roc_nix_unregister_queue_irqs(nix);
904 roc_nix_tm_fini(nix);
906 nix_free_queue_mem(dev);
907 rc |= roc_nix_lf_free(nix);
914 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
916 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
917 struct rte_eth_dev_data *data = eth_dev->data;
918 struct roc_nix_sq *sq = &dev->sqs[qid];
921 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
924 rc = roc_nix_tm_sq_aura_fc(sq, true);
926 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
930 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
936 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
938 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
939 struct rte_eth_dev_data *data = eth_dev->data;
940 struct roc_nix_sq *sq = &dev->sqs[qid];
943 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
946 rc = roc_nix_tm_sq_aura_fc(sq, false);
948 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
953 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
959 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
961 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
962 struct rte_eth_dev_data *data = eth_dev->data;
963 struct roc_nix_rq *rq = &dev->rqs[qid];
966 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
969 rc = roc_nix_rq_ena_dis(rq, true);
971 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
975 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
981 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
983 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
984 struct rte_eth_dev_data *data = eth_dev->data;
985 struct roc_nix_rq *rq = &dev->rqs[qid];
988 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
991 rc = roc_nix_rq_ena_dis(rq, false);
993 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
997 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1003 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1005 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1006 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1007 struct rte_mbuf *rx_pkts[32];
1008 int count, i, j, rc;
1011 /* Disable switch hdr pkind */
1012 roc_nix_switch_hdr_set(&dev->nix, 0);
1014 /* Stop link change events */
1015 if (!roc_nix_is_vf_or_sdp(&dev->nix))
1016 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1018 /* Disable Rx via NPC */
1019 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1021 /* Stop rx queues and free up pkts pending */
1022 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1023 rc = dev_ops->rx_queue_stop(eth_dev, i);
1027 rxq = eth_dev->data->rx_queues[i];
1028 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1030 for (j = 0; j < count; j++)
1031 rte_pktmbuf_free(rx_pkts[j]);
1032 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1036 /* Stop tx queues */
1037 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1038 dev_ops->tx_queue_stop(eth_dev, i);
1044 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1046 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1049 if (eth_dev->data->nb_rx_queues != 0) {
1050 rc = nix_recalc_mtu(eth_dev);
1055 /* Start rx queues */
1056 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1057 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1062 /* Start tx queues */
1063 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1064 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1069 /* Enable Rx in NPC */
1070 rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1072 plt_err("Failed to enable NPC rx %d", rc);
1076 cnxk_nix_toggle_flag_link_cfg(dev, true);
1078 /* Start link change events */
1079 if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1080 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1082 plt_err("Failed to start cgx link event %d", rc);
1087 cnxk_nix_toggle_flag_link_cfg(dev, false);
1092 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1093 cnxk_nix_toggle_flag_link_cfg(dev, false);
1097 /* CNXK platform independent eth dev ops */
1098 struct eth_dev_ops cnxk_eth_dev_ops = {
1099 .mtu_set = cnxk_nix_mtu_set,
1100 .mac_addr_add = cnxk_nix_mac_addr_add,
1101 .mac_addr_remove = cnxk_nix_mac_addr_del,
1102 .mac_addr_set = cnxk_nix_mac_addr_set,
1103 .dev_infos_get = cnxk_nix_info_get,
1104 .link_update = cnxk_nix_link_update,
1105 .tx_queue_release = cnxk_nix_tx_queue_release,
1106 .rx_queue_release = cnxk_nix_rx_queue_release,
1107 .dev_stop = cnxk_nix_dev_stop,
1108 .tx_queue_start = cnxk_nix_tx_queue_start,
1109 .rx_queue_start = cnxk_nix_rx_queue_start,
1110 .rx_queue_stop = cnxk_nix_rx_queue_stop,
1111 .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1112 .promiscuous_enable = cnxk_nix_promisc_enable,
1113 .promiscuous_disable = cnxk_nix_promisc_disable,
1114 .allmulticast_enable = cnxk_nix_allmulticast_enable,
1115 .allmulticast_disable = cnxk_nix_allmulticast_disable,
1116 .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1117 .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1121 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1123 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1124 struct roc_nix *nix = &dev->nix;
1125 struct rte_pci_device *pci_dev;
1126 int rc, max_entries;
1128 eth_dev->dev_ops = &cnxk_eth_dev_ops;
1130 /* For secondary processes, the primary has done all the work */
1131 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1134 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1135 rte_eth_copy_pci_info(eth_dev, pci_dev);
1137 /* Parse devargs string */
1138 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1140 plt_err("Failed to parse devargs rc=%d", rc);
1144 /* Initialize base roc nix */
1145 nix->pci_dev = pci_dev;
1146 rc = roc_nix_dev_init(nix);
1148 plt_err("Failed to initialize roc nix rc=%d", rc);
1152 /* Register up msg callbacks */
1153 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1155 dev->eth_dev = eth_dev;
1156 dev->configured = 0;
1157 dev->ptype_disable = 0;
1159 /* For vfs, returned max_entries will be 0. but to keep default mac
1160 * address, one entry must be allocated. so setting up to 1.
1162 if (roc_nix_is_vf_or_sdp(nix))
1165 max_entries = roc_nix_mac_max_entries_get(nix);
1167 if (max_entries <= 0) {
1168 plt_err("Failed to get max entries for mac addr");
1173 eth_dev->data->mac_addrs =
1174 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1175 if (eth_dev->data->mac_addrs == NULL) {
1176 plt_err("Failed to allocate memory for mac addr");
1181 dev->max_mac_entries = max_entries;
1183 /* Get mac address */
1184 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1186 plt_err("Failed to get mac addr, rc=%d", rc);
1187 goto free_mac_addrs;
1190 /* Update the mac address */
1191 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1193 if (!roc_nix_is_vf_or_sdp(nix)) {
1194 /* Sync same MAC address to CGX/RPM table */
1195 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1197 plt_err("Failed to set mac addr, rc=%d", rc);
1198 goto free_mac_addrs;
1202 /* Union of all capabilities supported by CNXK.
1203 * Platform specific capabilities will be
1206 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1207 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1208 dev->speed_capa = nix_get_speed_capa(dev);
1210 /* Initialize roc npc */
1211 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1212 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1213 eth_dev->data->port_id, roc_nix_get_pf(nix),
1214 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1215 dev->rx_offload_capa, dev->tx_offload_capa);
1219 rte_free(eth_dev->data->mac_addrs);
1221 roc_nix_dev_fini(nix);
1223 plt_err("Failed to init nix eth_dev rc=%d", rc);
1228 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
1230 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1231 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1232 struct roc_nix *nix = &dev->nix;
1235 /* Nothing to be done for secondary processes */
1236 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1239 /* Clear the flag since we are closing down */
1240 dev->configured = 0;
1242 roc_nix_npc_rx_ena_dis(nix, false);
1244 /* Disable link status events */
1245 roc_nix_mac_link_event_start_stop(nix, false);
1248 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1249 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
1250 eth_dev->data->tx_queues[i] = NULL;
1252 eth_dev->data->nb_tx_queues = 0;
1254 /* Free up RQ's and CQ's */
1255 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1256 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
1257 eth_dev->data->rx_queues[i] = NULL;
1259 eth_dev->data->nb_rx_queues = 0;
1261 /* Free tm resources */
1262 roc_nix_tm_fini(nix);
1264 /* Unregister queue irqs */
1265 roc_nix_unregister_queue_irqs(nix);
1267 /* Unregister cq irqs */
1268 if (eth_dev->data->dev_conf.intr_conf.rxq)
1269 roc_nix_unregister_cq_irqs(nix);
1271 /* Free ROC RQ's, SQ's and CQ's memory */
1272 nix_free_queue_mem(dev);
1274 /* Free nix lf resources */
1275 rc = roc_nix_lf_free(nix);
1277 plt_err("Failed to free nix lf, rc=%d", rc);
1279 rte_free(eth_dev->data->mac_addrs);
1280 eth_dev->data->mac_addrs = NULL;
1282 /* Check if mbox close is needed */
1286 rc = roc_nix_dev_fini(nix);
1287 /* Can be freed later by PMD if NPA LF is in use */
1288 if (rc == -EAGAIN) {
1289 eth_dev->data->dev_private = NULL;
1292 plt_err("Failed in nix dev fini, rc=%d", rc);
1299 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1301 struct rte_eth_dev *eth_dev;
1302 struct roc_nix *nix;
1305 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1307 /* Cleanup eth dev */
1308 rc = cnxk_eth_dev_uninit(eth_dev, true);
1312 rte_eth_dev_release_port(eth_dev);
1315 /* Nothing to be done for secondary processes */
1316 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1319 /* Check if this device is hosting common resource */
1320 nix = roc_idev_npa_nix_get();
1321 if (nix->pci_dev != pci_dev)
1324 /* Try nix fini now */
1325 rc = roc_nix_dev_fini(nix);
1326 if (rc == -EAGAIN) {
1327 plt_info("%s: common resource in use by other devices",
1331 plt_err("Failed in nix dev fini, rc=%d", rc);
1335 /* Free device pointer as rte_ethdev does not have it anymore */
1342 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1346 RTE_SET_USED(pci_drv);
1348 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1351 /* On error on secondary, recheck if port exists in primary or
1352 * in mid of detach state.
1354 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1355 if (!rte_eth_dev_allocated(pci_dev->device.name))