1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix) ||
12 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG)
13 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
18 static inline uint64_t
19 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
22 return CNXK_NIX_TX_OFFLOAD_CAPA;
25 static inline uint32_t
26 nix_get_speed_capa(struct cnxk_eth_dev *dev)
30 /* Auto negotiation disabled */
31 speed_capa = ETH_LINK_SPEED_FIXED;
32 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
33 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
34 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
35 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
42 cnxk_nix_inb_mode_set(struct cnxk_eth_dev *dev, bool use_inl_dev)
44 struct roc_nix *nix = &dev->nix;
46 if (dev->inb.inl_dev == use_inl_dev)
49 plt_nix_dbg("Security sessions(%u) still active, inl=%u!!!",
50 dev->inb.nb_sess, !!dev->inb.inl_dev);
53 dev->inb.inl_dev = use_inl_dev;
55 /* Update RoC for NPC rule insertion */
56 roc_nix_inb_mode_set(nix, use_inl_dev);
58 /* Setup lookup mem */
59 return cnxk_nix_lookup_mem_sa_base_set(dev);
63 nix_security_setup(struct cnxk_eth_dev *dev)
65 struct roc_nix *nix = &dev->nix;
68 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
69 /* Setup Inline Inbound */
70 rc = roc_nix_inl_inb_init(nix);
72 plt_err("Failed to initialize nix inline inb, rc=%d",
77 /* By default pick using inline device for poll mode.
78 * Will be overridden when event mode rq's are setup.
80 cnxk_nix_inb_mode_set(dev, true);
83 if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
84 dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
85 struct plt_bitmap *bmap;
89 /* Setup enough descriptors for all tx queues */
90 nix->outb_nb_desc = dev->outb.nb_desc;
91 nix->outb_nb_crypto_qs = dev->outb.nb_crypto_qs;
93 /* Setup Inline Outbound */
94 rc = roc_nix_inl_outb_init(nix);
96 plt_err("Failed to initialize nix inline outb, rc=%d",
101 dev->outb.lf_base = roc_nix_inl_outb_lf_base_get(nix);
103 /* Skip the rest if DEV_TX_OFFLOAD_SECURITY is not enabled */
104 if (!(dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY))
108 /* Allocate a bitmap to alloc and free sa indexes */
109 bmap_sz = plt_bitmap_get_memory_footprint(dev->outb.max_sa);
110 mem = plt_zmalloc(bmap_sz, PLT_CACHE_LINE_SIZE);
112 plt_err("Outbound SA bmap alloc failed");
114 rc |= roc_nix_inl_outb_fini(nix);
119 bmap = plt_bitmap_init(dev->outb.max_sa, mem, bmap_sz);
121 plt_err("Outbound SA bmap init failed");
123 rc |= roc_nix_inl_outb_fini(nix);
128 for (i = 0; i < dev->outb.max_sa; i++)
129 plt_bitmap_set(bmap, i);
131 dev->outb.sa_base = roc_nix_inl_outb_sa_base_get(nix);
132 dev->outb.sa_bmap_mem = mem;
133 dev->outb.sa_bmap = bmap;
139 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
140 rc |= roc_nix_inl_inb_fini(nix);
145 nix_security_release(struct cnxk_eth_dev *dev)
147 struct rte_eth_dev *eth_dev = dev->eth_dev;
148 struct cnxk_eth_sec_sess *eth_sec, *tvar;
149 struct roc_nix *nix = &dev->nix;
152 /* Cleanup Inline inbound */
153 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
154 /* Destroy inbound sessions */
156 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->inb.list, entry, tvar)
157 cnxk_eth_sec_ops.session_destroy(eth_dev,
160 /* Clear lookup mem */
161 cnxk_nix_lookup_mem_sa_base_clear(dev);
163 rc = roc_nix_inl_inb_fini(nix);
165 plt_err("Failed to cleanup nix inline inb, rc=%d", rc);
169 /* Cleanup Inline outbound */
170 if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY ||
171 dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
172 /* Destroy outbound sessions */
174 RTE_TAILQ_FOREACH_SAFE(eth_sec, &dev->outb.list, entry, tvar)
175 cnxk_eth_sec_ops.session_destroy(eth_dev,
178 rc = roc_nix_inl_outb_fini(nix);
180 plt_err("Failed to cleanup nix inline outb, rc=%d", rc);
183 plt_bitmap_free(dev->outb.sa_bmap);
184 plt_free(dev->outb.sa_bmap_mem);
185 dev->outb.sa_bmap = NULL;
186 dev->outb.sa_bmap_mem = NULL;
189 dev->inb.inl_dev = false;
190 roc_nix_inb_mode_set(nix, false);
192 dev->inb.nb_sess = 0;
193 dev->outb.nb_sess = 0;
198 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
200 struct rte_pktmbuf_pool_private *mbp_priv;
201 struct rte_eth_dev *eth_dev;
202 struct cnxk_eth_dev *dev;
206 eth_dev = dev->eth_dev;
208 /* Get rx buffer size */
209 mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
210 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
212 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
213 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
214 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
219 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
221 struct rte_eth_dev_data *data = eth_dev->data;
222 struct cnxk_eth_rxq_sp *rxq;
226 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
227 /* Setup scatter mode if needed by jumbo */
228 nix_enable_mseg_on_jumbo(rxq);
230 /* Setup MTU based on max_rx_pkt_len */
231 mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
232 CNXK_NIX_MAX_VTAG_ACT_SIZE;
234 rc = cnxk_nix_mtu_set(eth_dev, mtu);
236 plt_err("Failed to set default MTU size, rc=%d", rc);
242 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
244 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
245 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
246 struct rte_eth_fc_conf fc_conf = {0};
249 /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
250 * by AF driver, update those info in PMD structure.
252 rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
256 fc->mode = fc_conf.mode;
257 fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
258 (fc_conf.mode == RTE_FC_RX_PAUSE);
259 fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
260 (fc_conf.mode == RTE_FC_TX_PAUSE);
267 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
269 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
270 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
271 struct rte_eth_fc_conf fc_cfg = {0};
273 if (roc_nix_is_vf_or_sdp(&dev->nix))
276 fc_cfg.mode = fc->mode;
278 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
279 if (roc_model_is_cn96_ax() &&
280 dev->npc.switch_header_type != ROC_PRIV_FLAGS_HIGIG &&
281 (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
283 (fc_cfg.mode == RTE_FC_FULL ||
284 fc_cfg.mode == RTE_FC_TX_PAUSE) ?
285 RTE_FC_TX_PAUSE : RTE_FC_NONE;
288 return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
292 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
294 uint16_t port_id = dev->eth_dev->data->port_id;
295 struct rte_mbuf mb_def;
298 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
299 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
300 offsetof(struct rte_mbuf, data_off) !=
302 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
303 offsetof(struct rte_mbuf, data_off) !=
305 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
306 offsetof(struct rte_mbuf, data_off) !=
309 mb_def.data_off = RTE_PKTMBUF_HEADROOM +
310 (dev->ptp_en * CNXK_NIX_TIMESYNC_RX_OFFSET);
311 mb_def.port = port_id;
312 rte_mbuf_refcnt_set(&mb_def, 1);
314 /* Prevent compiler reordering: rearm_data covers previous fields */
315 rte_compiler_barrier();
316 tmp = (uint64_t *)&mb_def.rearm_data;
321 static inline uint8_t
322 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
325 * Maximum three segments can be supported with W8, Choose
326 * NIX_MAXSQESZ_W16 for multi segment offload.
328 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
329 return NIX_MAXSQESZ_W16;
331 return NIX_MAXSQESZ_W8;
335 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
336 uint16_t nb_desc, uint16_t fp_tx_q_sz,
337 const struct rte_eth_txconf *tx_conf)
339 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
340 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
341 struct cnxk_eth_txq_sp *txq_sp;
342 struct roc_nix_sq *sq;
346 /* Free memory prior to re-allocation if needed. */
347 if (eth_dev->data->tx_queues[qid] != NULL) {
348 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
349 dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
350 eth_dev->data->tx_queues[qid] = NULL;
353 /* When Tx Security offload is enabled, increase tx desc count by
354 * max possible outbound desc count.
356 if (dev->tx_offloads & DEV_TX_OFFLOAD_SECURITY)
357 nb_desc += dev->outb.nb_desc;
362 sq->nb_desc = nb_desc;
363 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
365 rc = roc_nix_sq_init(&dev->nix, sq);
367 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
372 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
373 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
375 plt_err("Failed to alloc tx queue mem");
376 rc |= roc_nix_sq_fini(sq);
382 txq_sp->qconf.conf.tx = *tx_conf;
383 txq_sp->qconf.nb_desc = nb_desc;
385 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
386 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
387 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
388 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
390 /* Store start of fast path area */
391 eth_dev->data->tx_queues[qid] = txq_sp + 1;
392 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
397 cnxk_nix_tx_queue_release(void *txq)
399 struct cnxk_eth_txq_sp *txq_sp;
400 struct cnxk_eth_dev *dev;
401 struct roc_nix_sq *sq;
408 txq_sp = cnxk_eth_txq_to_sp(txq);
412 plt_nix_dbg("Releasing txq %u", qid);
416 rc = roc_nix_sq_fini(sq);
418 plt_err("Failed to cleanup sq, rc=%d", rc);
425 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
426 uint16_t nb_desc, uint16_t fp_rx_q_sz,
427 const struct rte_eth_rxconf *rx_conf,
428 struct rte_mempool *mp)
430 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
431 struct roc_nix *nix = &dev->nix;
432 struct cnxk_eth_rxq_sp *rxq_sp;
433 struct rte_mempool_ops *ops;
434 const char *platform_ops;
435 struct roc_nix_rq *rq;
436 struct roc_nix_cq *cq;
442 if (rx_conf->rx_deferred_start == 1) {
443 plt_err("Deferred Rx start is not supported");
447 platform_ops = rte_mbuf_platform_mempool_ops();
448 /* This driver needs cnxk_npa mempool ops to work */
449 ops = rte_mempool_get_ops(mp->ops_index);
450 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
451 plt_err("mempool ops should be of cnxk_npa type");
455 if (mp->pool_id == 0) {
456 plt_err("Invalid pool_id");
460 /* Free memory prior to re-allocation if needed */
461 if (eth_dev->data->rx_queues[qid] != NULL) {
462 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
464 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
465 dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
466 eth_dev->data->rx_queues[qid] = NULL;
469 /* Clam up cq limit to size of packet pool aura for LBK
470 * to avoid meta packet drop as LBK does not currently support
473 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY && roc_nix_is_lbk(nix)) {
474 uint64_t pkt_pool_limit = roc_nix_inl_dev_rq_limit_get();
476 /* Use current RQ's aura limit if inl rq is not available */
478 pkt_pool_limit = roc_npa_aura_op_limit_get(mp->pool_id);
479 nb_desc = RTE_MAX(nb_desc, pkt_pool_limit);
485 cq->nb_desc = nb_desc;
486 rc = roc_nix_cq_init(&dev->nix, cq);
488 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
495 rq->aura_handle = mp->pool_id;
496 rq->flow_tag_width = 32;
499 /* Calculate first mbuf skip */
500 first_skip = (sizeof(struct rte_mbuf));
501 first_skip += RTE_PKTMBUF_HEADROOM;
502 first_skip += rte_pktmbuf_priv_size(mp);
503 rq->first_skip = first_skip;
504 rq->later_skip = sizeof(struct rte_mbuf);
505 rq->lpb_size = mp->elt_size;
507 /* Enable Inline IPSec on RQ, will not be used for Poll mode */
508 if (roc_nix_inl_inb_is_enabled(nix))
509 rq->ipsech_ena = true;
511 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
513 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
517 /* Allocate and setup fast path rx queue */
519 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
520 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
522 plt_err("Failed to alloc rx queue for rq=%d", qid);
526 /* Setup slow path fields */
529 rxq_sp->qconf.conf.rx = *rx_conf;
530 rxq_sp->qconf.nb_desc = nb_desc;
531 rxq_sp->qconf.mp = mp;
533 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
534 /* Setup rq reference for inline dev if present */
535 rc = roc_nix_inl_dev_rq_get(rq);
540 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
543 /* Store start of fast path area */
544 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
545 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
547 /* Calculating delta and freq mult between PTP HI clock and tsc.
548 * These are needed in deriving raw clock value from tsc counter.
549 * read_clock eth op returns raw clock value.
551 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en) {
552 rc = cnxk_nix_tsc_convert(dev);
554 plt_err("Failed to calculate delta and freq mult");
563 rc |= roc_nix_rq_fini(rq);
565 rc |= roc_nix_cq_fini(cq);
571 cnxk_nix_rx_queue_release(void *rxq)
573 struct cnxk_eth_rxq_sp *rxq_sp;
574 struct cnxk_eth_dev *dev;
575 struct roc_nix_rq *rq;
576 struct roc_nix_cq *cq;
583 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
588 plt_nix_dbg("Releasing rxq %u", qid);
590 /* Release rq reference for inline dev if present */
591 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
592 roc_nix_inl_dev_rq_put(rq);
595 rc = roc_nix_rq_fini(rq);
597 plt_err("Failed to cleanup rq, rc=%d", rc);
601 rc = roc_nix_cq_fini(cq);
603 plt_err("Failed to cleanup cq, rc=%d", rc);
605 /* Finally free fast path area */
610 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
613 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
614 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
615 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
616 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
617 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
618 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
619 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
620 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
621 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
622 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
623 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
624 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
626 uint32_t flowkey_cfg = 0;
628 dev->ethdev_rss_hf = ethdev_rss;
630 if (ethdev_rss & ETH_RSS_L2_PAYLOAD &&
631 dev->npc.switch_header_type == ROC_PRIV_FLAGS_LEN_90B) {
632 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
635 if (ethdev_rss & ETH_RSS_C_VLAN)
636 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
638 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
639 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
641 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
642 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
644 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
645 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
647 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
648 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
650 if (ethdev_rss & RSS_IPV4_ENABLE)
651 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
653 if (ethdev_rss & RSS_IPV6_ENABLE)
654 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
656 if (ethdev_rss & ETH_RSS_TCP)
657 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
659 if (ethdev_rss & ETH_RSS_UDP)
660 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
662 if (ethdev_rss & ETH_RSS_SCTP)
663 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
665 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
666 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
668 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
669 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
671 if (ethdev_rss & ETH_RSS_PORT)
672 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
674 if (ethdev_rss & ETH_RSS_NVGRE)
675 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
677 if (ethdev_rss & ETH_RSS_VXLAN)
678 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
680 if (ethdev_rss & ETH_RSS_GENEVE)
681 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
683 if (ethdev_rss & ETH_RSS_GTPU)
684 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
690 nix_free_queue_mem(struct cnxk_eth_dev *dev)
701 nix_rss_default_setup(struct cnxk_eth_dev *dev)
703 struct rte_eth_dev *eth_dev = dev->eth_dev;
704 uint8_t rss_hash_level;
705 uint32_t flowkey_cfg;
708 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
709 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
713 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
714 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
718 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
720 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
721 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
722 struct cnxk_eth_qconf *tx_qconf = NULL;
723 struct cnxk_eth_qconf *rx_qconf = NULL;
724 struct cnxk_eth_rxq_sp *rxq_sp;
725 struct cnxk_eth_txq_sp *txq_sp;
726 int i, nb_rxq, nb_txq;
729 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
730 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
732 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
733 if (tx_qconf == NULL) {
734 plt_err("Failed to allocate memory for tx_qconf");
738 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
739 if (rx_qconf == NULL) {
740 plt_err("Failed to allocate memory for rx_qconf");
744 txq = eth_dev->data->tx_queues;
745 for (i = 0; i < nb_txq; i++) {
746 if (txq[i] == NULL) {
747 tx_qconf[i].valid = false;
748 plt_info("txq[%d] is already released", i);
751 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
752 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
753 tx_qconf[i].valid = true;
754 dev_ops->tx_queue_release(txq[i]);
755 eth_dev->data->tx_queues[i] = NULL;
758 rxq = eth_dev->data->rx_queues;
759 for (i = 0; i < nb_rxq; i++) {
760 if (rxq[i] == NULL) {
761 rx_qconf[i].valid = false;
762 plt_info("rxq[%d] is already released", i);
765 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
766 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
767 rx_qconf[i].valid = true;
768 dev_ops->rx_queue_release(rxq[i]);
769 eth_dev->data->rx_queues[i] = NULL;
772 dev->tx_qconf = tx_qconf;
773 dev->rx_qconf = rx_qconf;
783 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
785 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
786 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
787 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
788 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
789 int rc, i, nb_rxq, nb_txq;
792 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
793 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
796 /* Setup tx & rx queues with previous configuration so
797 * that the queues can be functional in cases like ports
798 * are started without re configuring queues.
800 * Usual re config sequence is like below:
806 * queue_configure() {
813 * In some application's control path, queue_configure() would
814 * NOT be invoked for TXQs/RXQs in port_configure().
815 * In such cases, queues can be functional after start as the
816 * queues are already setup in port_configure().
818 for (i = 0; i < nb_txq; i++) {
819 if (!tx_qconf[i].valid)
821 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
822 &tx_qconf[i].conf.tx);
824 plt_err("Failed to setup tx queue rc=%d", rc);
825 txq = eth_dev->data->tx_queues;
826 for (i -= 1; i >= 0; i--)
827 dev_ops->tx_queue_release(txq[i]);
835 for (i = 0; i < nb_rxq; i++) {
836 if (!rx_qconf[i].valid)
838 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
839 &rx_qconf[i].conf.rx,
842 plt_err("Failed to setup rx queue rc=%d", rc);
843 rxq = eth_dev->data->rx_queues;
844 for (i -= 1; i >= 0; i--)
845 dev_ops->rx_queue_release(rxq[i]);
846 goto tx_queue_release;
856 txq = eth_dev->data->tx_queues;
857 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
858 dev_ops->tx_queue_release(txq[i]);
869 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
879 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
881 /* These dummy functions are required for supporting
882 * some applications which reconfigure queues without
883 * stopping tx burst and rx burst threads(eg kni app)
884 * When the queues context is saved, txq/rxqs are released
885 * which caused app crash since rx/tx burst is still
886 * on different lcores
888 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
889 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
894 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
896 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
897 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
898 struct roc_nix *nix = &dev->nix;
901 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
905 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
906 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
907 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
908 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
910 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
911 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
912 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
913 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
918 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
920 struct roc_nix *nix = &dev->nix;
923 /* Nothing much to do if offload is not enabled */
924 if (!(dev->tx_offloads &
925 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
926 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
929 /* Setup LSO formats in AF. Its a no-op if other ethdev has
932 rc = roc_nix_lso_fmt_setup(nix);
936 return nix_lso_tun_fmt_update(dev);
940 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
942 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
943 struct rte_eth_dev_data *data = eth_dev->data;
944 struct rte_eth_conf *conf = &data->dev_conf;
945 struct rte_eth_rxmode *rxmode = &conf->rxmode;
946 struct rte_eth_txmode *txmode = &conf->txmode;
947 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
948 struct roc_nix_fc_cfg fc_cfg = {0};
949 struct roc_nix *nix = &dev->nix;
950 struct rte_ether_addr *ea;
951 uint8_t nb_rxq, nb_txq;
959 if (rte_eal_has_hugepages() == 0) {
960 plt_err("Huge page is not configured");
964 if (conf->dcb_capability_en == 1) {
965 plt_err("dcb enable is not supported");
969 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
970 plt_err("Flow director is not supported");
974 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
975 rxmode->mq_mode != ETH_MQ_RX_RSS) {
976 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
980 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
981 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
985 /* Free the resources allocated from the previous configure */
986 if (dev->configured == 1) {
987 /* Unregister queue irq's */
988 roc_nix_unregister_queue_irqs(nix);
990 /* Unregister CQ irqs if present */
991 if (eth_dev->data->dev_conf.intr_conf.rxq)
992 roc_nix_unregister_cq_irqs(nix);
994 /* Set no-op functions */
995 nix_set_nop_rxtx_function(eth_dev);
996 /* Store queue config for later */
997 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1001 /* Cleanup security support */
1002 rc = nix_security_release(dev);
1004 goto fail_configure;
1006 roc_nix_tm_fini(nix);
1007 roc_nix_lf_free(nix);
1010 dev->rx_offloads = rxmode->offloads;
1011 dev->tx_offloads = txmode->offloads;
1013 /* Prepare rx cfg */
1014 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
1015 if (dev->rx_offloads &
1016 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
1017 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
1018 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
1020 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
1021 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
1022 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
1024 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1025 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1027 /* Alloc a nix lf */
1028 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
1030 plt_err("Failed to init nix_lf rc=%d", rc);
1031 goto fail_configure;
1034 dev->npc.channel = roc_nix_get_base_chan(nix);
1036 nb_rxq = data->nb_rx_queues;
1037 nb_txq = data->nb_tx_queues;
1040 /* Allocate memory for roc rq's and cq's */
1041 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
1043 plt_err("Failed to alloc rqs");
1048 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
1050 plt_err("Failed to alloc cqs");
1057 /* Allocate memory for roc sq's */
1058 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
1060 plt_err("Failed to alloc sqs");
1066 /* Re-enable NIX LF error interrupts */
1067 roc_nix_err_intr_ena_dis(nix, true);
1068 roc_nix_ras_intr_ena_dis(nix, true);
1070 if (nix->rx_ptp_ena &&
1071 dev->npc.switch_header_type == ROC_PRIV_FLAGS_HIGIG) {
1072 plt_err("Both PTP and switch header enabled");
1076 rc = roc_nix_switch_hdr_set(nix, dev->npc.switch_header_type);
1078 plt_err("Failed to enable switch type nix_lf rc=%d", rc);
1082 /* Setup LSO if needed */
1083 rc = nix_lso_fmt_setup(dev);
1085 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
1090 rc = nix_rss_default_setup(dev);
1092 plt_err("Failed to configure rss rc=%d", rc);
1096 /* Init the default TM scheduler hierarchy */
1097 rc = roc_nix_tm_init(nix);
1099 plt_err("Failed to init traffic manager, rc=%d", rc);
1103 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
1105 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
1109 /* Register queue IRQs */
1110 rc = roc_nix_register_queue_irqs(nix);
1112 plt_err("Failed to register queue interrupts rc=%d", rc);
1116 /* Register cq IRQs */
1117 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1118 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
1119 plt_err("Rx interrupt cannot be enabled, rxq > %d",
1123 /* Rx interrupt feature cannot work with vector mode because,
1124 * vector mode does not process packets unless min 4 pkts are
1125 * received, while cq interrupts are generated even for 1 pkt
1128 dev->scalar_ena = true;
1130 rc = roc_nix_register_cq_irqs(nix);
1132 plt_err("Failed to register CQ interrupts rc=%d", rc);
1137 /* Configure loop back mode */
1138 rc = roc_nix_mac_loopback_enable(nix,
1139 eth_dev->data->dev_conf.lpbk_mode);
1141 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
1145 /* Init flow control configuration */
1146 fc_cfg.cq_cfg_valid = false;
1147 fc_cfg.rxchan_cfg.enable = true;
1148 rc = roc_nix_fc_config_set(nix, &fc_cfg);
1150 plt_err("Failed to initialize flow control rc=%d", rc);
1154 /* Update flow control configuration to PMD */
1155 rc = nix_init_flow_ctrl_config(eth_dev);
1157 plt_err("Failed to initialize flow control rc=%d", rc);
1161 /* Setup Inline security support */
1162 rc = nix_security_setup(dev);
1167 * Restore queue config when reconfigure followed by
1168 * reconfigure and no queue configure invoked from application case.
1170 if (dev->configured == 1) {
1171 rc = nix_restore_queue_cfg(eth_dev);
1176 /* Update the mac address */
1177 ea = eth_dev->data->mac_addrs;
1178 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1179 if (rte_is_zero_ether_addr(ea))
1180 rte_eth_random_addr((uint8_t *)ea);
1182 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1184 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1185 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
1186 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
1187 dev->rx_offloads, dev->tx_offloads);
1190 dev->configured = 1;
1191 dev->nb_rxq = data->nb_rx_queues;
1192 dev->nb_txq = data->nb_tx_queues;
1196 rc |= nix_security_release(dev);
1198 roc_nix_unregister_cq_irqs(nix);
1200 roc_nix_unregister_queue_irqs(nix);
1202 roc_nix_tm_fini(nix);
1204 nix_free_queue_mem(dev);
1205 rc |= roc_nix_lf_free(nix);
1207 dev->configured = 0;
1212 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1214 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1215 struct rte_eth_dev_data *data = eth_dev->data;
1216 struct roc_nix_sq *sq = &dev->sqs[qid];
1219 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1222 rc = roc_nix_tm_sq_aura_fc(sq, true);
1224 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
1228 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1234 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1236 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1237 struct rte_eth_dev_data *data = eth_dev->data;
1238 struct roc_nix_sq *sq = &dev->sqs[qid];
1241 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1244 rc = roc_nix_tm_sq_aura_fc(sq, false);
1246 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1251 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1257 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1259 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1260 struct rte_eth_dev_data *data = eth_dev->data;
1261 struct roc_nix_rq *rq = &dev->rqs[qid];
1264 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1267 rc = roc_nix_rq_ena_dis(rq, true);
1269 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1273 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1279 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1281 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1282 struct rte_eth_dev_data *data = eth_dev->data;
1283 struct roc_nix_rq *rq = &dev->rqs[qid];
1286 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1289 rc = roc_nix_rq_ena_dis(rq, false);
1291 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1295 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1301 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1303 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1304 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1305 struct rte_mbuf *rx_pkts[32];
1306 struct rte_eth_link link;
1307 int count, i, j, rc;
1310 /* Disable switch hdr pkind */
1311 roc_nix_switch_hdr_set(&dev->nix, 0);
1313 /* Stop link change events */
1314 if (!roc_nix_is_vf_or_sdp(&dev->nix))
1315 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1317 /* Disable Rx via NPC */
1318 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1320 /* Stop rx queues and free up pkts pending */
1321 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1322 rc = dev_ops->rx_queue_stop(eth_dev, i);
1326 rxq = eth_dev->data->rx_queues[i];
1327 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1329 for (j = 0; j < count; j++)
1330 rte_pktmbuf_free(rx_pkts[j]);
1331 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1335 /* Stop tx queues */
1336 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1337 dev_ops->tx_queue_stop(eth_dev, i);
1339 /* Bring down link status internally */
1340 memset(&link, 0, sizeof(link));
1341 rte_eth_linkstatus_set(eth_dev, &link);
1347 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1349 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1352 if (eth_dev->data->nb_rx_queues != 0 && !dev->ptp_en) {
1353 rc = nix_recalc_mtu(eth_dev);
1358 /* Start rx queues */
1359 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1360 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1365 /* Start tx queues */
1366 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1367 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1372 /* Update Flow control configuration */
1373 rc = nix_update_flow_ctrl_config(eth_dev);
1375 plt_err("Failed to enable flow control. error code(%d)", rc);
1379 /* Enable Rx in NPC */
1380 rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1382 plt_err("Failed to enable NPC rx %d", rc);
1386 cnxk_nix_toggle_flag_link_cfg(dev, true);
1388 /* Start link change events */
1389 if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1390 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1392 plt_err("Failed to start cgx link event %d", rc);
1397 /* Enable PTP if it is requested by the user or already
1398 * enabled on PF owning this VF
1400 memset(&dev->tstamp, 0, sizeof(struct cnxk_timesync_info));
1401 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) || dev->ptp_en)
1402 cnxk_eth_dev_ops.timesync_enable(eth_dev);
1404 cnxk_eth_dev_ops.timesync_disable(eth_dev);
1406 if (dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) {
1407 rc = rte_mbuf_dyn_rx_timestamp_register
1408 (&dev->tstamp.tstamp_dynfield_offset,
1409 &dev->tstamp.rx_tstamp_dynflag);
1411 plt_err("Failed to register Rx timestamp field/flag");
1416 cnxk_nix_toggle_flag_link_cfg(dev, false);
1421 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1422 cnxk_nix_toggle_flag_link_cfg(dev, false);
1426 static int cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev);
1427 static int cnxk_nix_dev_close(struct rte_eth_dev *eth_dev);
1429 /* CNXK platform independent eth dev ops */
1430 struct eth_dev_ops cnxk_eth_dev_ops = {
1431 .mtu_set = cnxk_nix_mtu_set,
1432 .mac_addr_add = cnxk_nix_mac_addr_add,
1433 .mac_addr_remove = cnxk_nix_mac_addr_del,
1434 .mac_addr_set = cnxk_nix_mac_addr_set,
1435 .dev_infos_get = cnxk_nix_info_get,
1436 .link_update = cnxk_nix_link_update,
1437 .tx_queue_release = cnxk_nix_tx_queue_release,
1438 .rx_queue_release = cnxk_nix_rx_queue_release,
1439 .dev_stop = cnxk_nix_dev_stop,
1440 .dev_close = cnxk_nix_dev_close,
1441 .dev_reset = cnxk_nix_dev_reset,
1442 .tx_queue_start = cnxk_nix_tx_queue_start,
1443 .rx_queue_start = cnxk_nix_rx_queue_start,
1444 .rx_queue_stop = cnxk_nix_rx_queue_stop,
1445 .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1446 .promiscuous_enable = cnxk_nix_promisc_enable,
1447 .promiscuous_disable = cnxk_nix_promisc_disable,
1448 .allmulticast_enable = cnxk_nix_allmulticast_enable,
1449 .allmulticast_disable = cnxk_nix_allmulticast_disable,
1450 .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1451 .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1452 .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1453 .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1454 .dev_set_link_up = cnxk_nix_set_link_up,
1455 .dev_set_link_down = cnxk_nix_set_link_down,
1456 .get_module_info = cnxk_nix_get_module_info,
1457 .get_module_eeprom = cnxk_nix_get_module_eeprom,
1458 .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1459 .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1460 .pool_ops_supported = cnxk_nix_pool_ops_supported,
1461 .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1462 .stats_get = cnxk_nix_stats_get,
1463 .stats_reset = cnxk_nix_stats_reset,
1464 .xstats_get = cnxk_nix_xstats_get,
1465 .xstats_get_names = cnxk_nix_xstats_get_names,
1466 .xstats_reset = cnxk_nix_xstats_reset,
1467 .xstats_get_by_id = cnxk_nix_xstats_get_by_id,
1468 .xstats_get_names_by_id = cnxk_nix_xstats_get_names_by_id,
1469 .fw_version_get = cnxk_nix_fw_version_get,
1470 .rxq_info_get = cnxk_nix_rxq_info_get,
1471 .txq_info_get = cnxk_nix_txq_info_get,
1472 .tx_done_cleanup = cnxk_nix_tx_done_cleanup,
1473 .flow_ops_get = cnxk_nix_flow_ops_get,
1474 .get_reg = cnxk_nix_dev_get_reg,
1475 .timesync_read_rx_timestamp = cnxk_nix_timesync_read_rx_timestamp,
1476 .timesync_read_tx_timestamp = cnxk_nix_timesync_read_tx_timestamp,
1477 .timesync_read_time = cnxk_nix_timesync_read_time,
1478 .timesync_write_time = cnxk_nix_timesync_write_time,
1479 .timesync_adjust_time = cnxk_nix_timesync_adjust_time,
1480 .read_clock = cnxk_nix_read_clock,
1481 .reta_update = cnxk_nix_reta_update,
1482 .reta_query = cnxk_nix_reta_query,
1483 .rss_hash_update = cnxk_nix_rss_hash_update,
1484 .rss_hash_conf_get = cnxk_nix_rss_hash_conf_get,
1485 .set_mc_addr_list = cnxk_nix_mc_addr_list_configure,
1486 .set_queue_rate_limit = cnxk_nix_tm_set_queue_rate_limit,
1487 .tm_ops_get = cnxk_nix_tm_ops_get,
1491 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1493 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1494 struct rte_security_ctx *sec_ctx;
1495 struct roc_nix *nix = &dev->nix;
1496 struct rte_pci_device *pci_dev;
1497 int rc, max_entries;
1499 eth_dev->dev_ops = &cnxk_eth_dev_ops;
1501 /* Alloc security context */
1502 sec_ctx = plt_zmalloc(sizeof(struct rte_security_ctx), 0);
1505 sec_ctx->device = eth_dev;
1506 sec_ctx->ops = &cnxk_eth_sec_ops;
1508 (RTE_SEC_CTX_F_FAST_SET_MDATA | RTE_SEC_CTX_F_FAST_GET_UDATA);
1509 eth_dev->security_ctx = sec_ctx;
1510 TAILQ_INIT(&dev->inb.list);
1511 TAILQ_INIT(&dev->outb.list);
1513 /* For secondary processes, the primary has done all the work */
1514 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1517 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1518 rte_eth_copy_pci_info(eth_dev, pci_dev);
1520 /* Parse devargs string */
1521 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1523 plt_err("Failed to parse devargs rc=%d", rc);
1527 /* Initialize base roc nix */
1528 nix->pci_dev = pci_dev;
1529 nix->hw_vlan_ins = true;
1530 rc = roc_nix_dev_init(nix);
1532 plt_err("Failed to initialize roc nix rc=%d", rc);
1536 /* Register up msg callbacks */
1537 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1539 /* Register up msg callbacks */
1540 roc_nix_mac_link_info_get_cb_register(nix,
1541 cnxk_eth_dev_link_status_get_cb);
1543 dev->eth_dev = eth_dev;
1544 dev->configured = 0;
1545 dev->ptype_disable = 0;
1547 /* For vfs, returned max_entries will be 0. but to keep default mac
1548 * address, one entry must be allocated. so setting up to 1.
1550 if (roc_nix_is_vf_or_sdp(nix))
1553 max_entries = roc_nix_mac_max_entries_get(nix);
1555 if (max_entries <= 0) {
1556 plt_err("Failed to get max entries for mac addr");
1561 eth_dev->data->mac_addrs =
1562 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1563 if (eth_dev->data->mac_addrs == NULL) {
1564 plt_err("Failed to allocate memory for mac addr");
1569 dev->max_mac_entries = max_entries;
1570 dev->dmac_filter_count = 1;
1572 /* Get mac address */
1573 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1575 plt_err("Failed to get mac addr, rc=%d", rc);
1576 goto free_mac_addrs;
1579 /* Update the mac address */
1580 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1582 if (!roc_nix_is_vf_or_sdp(nix)) {
1583 /* Sync same MAC address to CGX/RPM table */
1584 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1586 plt_err("Failed to set mac addr, rc=%d", rc);
1587 goto free_mac_addrs;
1591 /* Union of all capabilities supported by CNXK.
1592 * Platform specific capabilities will be
1595 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1596 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1597 dev->speed_capa = nix_get_speed_capa(dev);
1599 /* Initialize roc npc */
1600 dev->npc.roc_nix = nix;
1601 rc = roc_npc_init(&dev->npc);
1603 goto free_mac_addrs;
1605 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1606 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1607 eth_dev->data->port_id, roc_nix_get_pf(nix),
1608 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1609 dev->rx_offload_capa, dev->tx_offload_capa);
1613 rte_free(eth_dev->data->mac_addrs);
1615 roc_nix_dev_fini(nix);
1617 plt_err("Failed to init nix eth_dev rc=%d", rc);
1622 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool reset)
1624 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1625 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1626 struct roc_nix *nix = &dev->nix;
1629 plt_free(eth_dev->security_ctx);
1630 eth_dev->security_ctx = NULL;
1632 /* Nothing to be done for secondary processes */
1633 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1636 /* Clear the flag since we are closing down */
1637 dev->configured = 0;
1639 roc_nix_npc_rx_ena_dis(nix, false);
1641 /* Disable and free rte_flow entries */
1642 roc_npc_fini(&dev->npc);
1644 /* Disable link status events */
1645 roc_nix_mac_link_event_start_stop(nix, false);
1647 /* Unregister the link update op, this is required to stop VFs from
1648 * receiving link status updates on exit path.
1650 roc_nix_mac_link_cb_unregister(nix);
1653 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1654 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
1655 eth_dev->data->tx_queues[i] = NULL;
1657 eth_dev->data->nb_tx_queues = 0;
1659 /* Free up RQ's and CQ's */
1660 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1661 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
1662 eth_dev->data->rx_queues[i] = NULL;
1664 eth_dev->data->nb_rx_queues = 0;
1666 /* Free security resources */
1667 nix_security_release(dev);
1669 /* Free tm resources */
1670 roc_nix_tm_fini(nix);
1672 /* Unregister queue irqs */
1673 roc_nix_unregister_queue_irqs(nix);
1675 /* Unregister cq irqs */
1676 if (eth_dev->data->dev_conf.intr_conf.rxq)
1677 roc_nix_unregister_cq_irqs(nix);
1679 /* Free ROC RQ's, SQ's and CQ's memory */
1680 nix_free_queue_mem(dev);
1682 /* Free nix lf resources */
1683 rc = roc_nix_lf_free(nix);
1685 plt_err("Failed to free nix lf, rc=%d", rc);
1687 rte_free(eth_dev->data->mac_addrs);
1688 eth_dev->data->mac_addrs = NULL;
1690 rc = roc_nix_dev_fini(nix);
1691 /* Can be freed later by PMD if NPA LF is in use */
1692 if (rc == -EAGAIN) {
1694 eth_dev->data->dev_private = NULL;
1697 plt_err("Failed in nix dev fini, rc=%d", rc);
1704 cnxk_nix_dev_close(struct rte_eth_dev *eth_dev)
1706 cnxk_eth_dev_uninit(eth_dev, false);
1711 cnxk_nix_dev_reset(struct rte_eth_dev *eth_dev)
1715 rc = cnxk_eth_dev_uninit(eth_dev, true);
1719 return cnxk_eth_dev_init(eth_dev);
1723 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1725 struct rte_eth_dev *eth_dev;
1726 struct roc_nix *nix;
1729 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1731 /* Cleanup eth dev */
1732 rc = cnxk_eth_dev_uninit(eth_dev, false);
1736 rte_eth_dev_release_port(eth_dev);
1739 /* Nothing to be done for secondary processes */
1740 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1743 /* Check if this device is hosting common resource */
1744 nix = roc_idev_npa_nix_get();
1745 if (nix->pci_dev != pci_dev)
1748 /* Try nix fini now */
1749 rc = roc_nix_dev_fini(nix);
1750 if (rc == -EAGAIN) {
1751 plt_info("%s: common resource in use by other devices",
1755 plt_err("Failed in nix dev fini, rc=%d", rc);
1759 /* Free device pointer as rte_ethdev does not have it anymore */
1766 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1770 RTE_SET_USED(pci_drv);
1772 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1775 /* On error on secondary, recheck if port exists in primary or
1776 * in mid of detach state.
1778 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1779 if (!rte_eth_dev_allocated(pci_dev->device.name))