1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
4 #include <cnxk_ethdev.h>
7 nix_get_rx_offload_capa(struct cnxk_eth_dev *dev)
9 uint64_t capa = CNXK_NIX_RX_OFFLOAD_CAPA;
11 if (roc_nix_is_vf_or_sdp(&dev->nix))
12 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
17 static inline uint64_t
18 nix_get_tx_offload_capa(struct cnxk_eth_dev *dev)
21 return CNXK_NIX_TX_OFFLOAD_CAPA;
24 static inline uint32_t
25 nix_get_speed_capa(struct cnxk_eth_dev *dev)
29 /* Auto negotiation disabled */
30 speed_capa = ETH_LINK_SPEED_FIXED;
31 if (!roc_nix_is_vf_or_sdp(&dev->nix) && !roc_nix_is_lbk(&dev->nix)) {
32 speed_capa |= ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
33 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
34 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
41 nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
43 struct rte_pktmbuf_pool_private *mbp_priv;
44 struct rte_eth_dev *eth_dev;
45 struct cnxk_eth_dev *dev;
49 eth_dev = dev->eth_dev;
51 /* Get rx buffer size */
52 mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
53 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
55 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
56 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
57 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
62 nix_recalc_mtu(struct rte_eth_dev *eth_dev)
64 struct rte_eth_dev_data *data = eth_dev->data;
65 struct cnxk_eth_rxq_sp *rxq;
69 rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
70 /* Setup scatter mode if needed by jumbo */
71 nix_enable_mseg_on_jumbo(rxq);
73 /* Setup MTU based on max_rx_pkt_len */
74 mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
75 CNXK_NIX_MAX_VTAG_ACT_SIZE;
77 rc = cnxk_nix_mtu_set(eth_dev, mtu);
79 plt_err("Failed to set default MTU size, rc=%d", rc);
85 nix_init_flow_ctrl_config(struct rte_eth_dev *eth_dev)
87 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
88 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
89 struct rte_eth_fc_conf fc_conf = {0};
92 /* Both Rx & Tx flow ctrl get enabled(RTE_FC_FULL) in HW
93 * by AF driver, update those info in PMD structure.
95 rc = cnxk_nix_flow_ctrl_get(eth_dev, &fc_conf);
99 fc->mode = fc_conf.mode;
100 fc->rx_pause = (fc_conf.mode == RTE_FC_FULL) ||
101 (fc_conf.mode == RTE_FC_RX_PAUSE);
102 fc->tx_pause = (fc_conf.mode == RTE_FC_FULL) ||
103 (fc_conf.mode == RTE_FC_TX_PAUSE);
110 nix_update_flow_ctrl_config(struct rte_eth_dev *eth_dev)
112 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
113 struct cnxk_fc_cfg *fc = &dev->fc_cfg;
114 struct rte_eth_fc_conf fc_cfg = {0};
116 if (roc_nix_is_vf_or_sdp(&dev->nix))
119 fc_cfg.mode = fc->mode;
121 /* To avoid Link credit deadlock on Ax, disable Tx FC if it's enabled */
122 if (roc_model_is_cn96_ax() &&
123 (fc_cfg.mode == RTE_FC_FULL || fc_cfg.mode == RTE_FC_RX_PAUSE)) {
125 (fc_cfg.mode == RTE_FC_FULL ||
126 fc_cfg.mode == RTE_FC_TX_PAUSE) ?
127 RTE_FC_TX_PAUSE : RTE_FC_NONE;
130 return cnxk_nix_flow_ctrl_set(eth_dev, &fc_cfg);
134 cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
136 uint16_t port_id = dev->eth_dev->data->port_id;
137 struct rte_mbuf mb_def;
140 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
141 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
142 offsetof(struct rte_mbuf, data_off) !=
144 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
145 offsetof(struct rte_mbuf, data_off) !=
147 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
148 offsetof(struct rte_mbuf, data_off) !=
151 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
152 mb_def.port = port_id;
153 rte_mbuf_refcnt_set(&mb_def, 1);
155 /* Prevent compiler reordering: rearm_data covers previous fields */
156 rte_compiler_barrier();
157 tmp = (uint64_t *)&mb_def.rearm_data;
162 static inline uint8_t
163 nix_sq_max_sqe_sz(struct cnxk_eth_dev *dev)
166 * Maximum three segments can be supported with W8, Choose
167 * NIX_MAXSQESZ_W16 for multi segment offload.
169 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
170 return NIX_MAXSQESZ_W16;
172 return NIX_MAXSQESZ_W8;
176 cnxk_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
177 uint16_t nb_desc, uint16_t fp_tx_q_sz,
178 const struct rte_eth_txconf *tx_conf)
180 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
181 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
182 struct cnxk_eth_txq_sp *txq_sp;
183 struct roc_nix_sq *sq;
187 /* Free memory prior to re-allocation if needed. */
188 if (eth_dev->data->tx_queues[qid] != NULL) {
189 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
190 dev_ops->tx_queue_release(eth_dev->data->tx_queues[qid]);
191 eth_dev->data->tx_queues[qid] = NULL;
197 sq->nb_desc = nb_desc;
198 sq->max_sqe_sz = nix_sq_max_sqe_sz(dev);
200 rc = roc_nix_sq_init(&dev->nix, sq);
202 plt_err("Failed to init sq=%d, rc=%d", qid, rc);
207 txq_sz = sizeof(struct cnxk_eth_txq_sp) + fp_tx_q_sz;
208 txq_sp = plt_zmalloc(txq_sz, PLT_CACHE_LINE_SIZE);
210 plt_err("Failed to alloc tx queue mem");
211 rc |= roc_nix_sq_fini(sq);
217 txq_sp->qconf.conf.tx = *tx_conf;
218 txq_sp->qconf.nb_desc = nb_desc;
220 plt_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " lmt_addr=%p"
221 " nb_sqb_bufs=%d sqes_per_sqb_log2=%d",
222 qid, sq->fc, dev->tx_offloads, sq->lmt_addr,
223 sq->nb_sqb_bufs, sq->sqes_per_sqb_log2);
225 /* Store start of fast path area */
226 eth_dev->data->tx_queues[qid] = txq_sp + 1;
227 eth_dev->data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
232 cnxk_nix_tx_queue_release(void *txq)
234 struct cnxk_eth_txq_sp *txq_sp;
235 struct cnxk_eth_dev *dev;
236 struct roc_nix_sq *sq;
243 txq_sp = cnxk_eth_txq_to_sp(txq);
247 plt_nix_dbg("Releasing txq %u", qid);
251 rc = roc_nix_sq_fini(sq);
253 plt_err("Failed to cleanup sq, rc=%d", rc);
260 cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
261 uint16_t nb_desc, uint16_t fp_rx_q_sz,
262 const struct rte_eth_rxconf *rx_conf,
263 struct rte_mempool *mp)
265 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
266 struct cnxk_eth_rxq_sp *rxq_sp;
267 struct rte_mempool_ops *ops;
268 const char *platform_ops;
269 struct roc_nix_rq *rq;
270 struct roc_nix_cq *cq;
276 if (rx_conf->rx_deferred_start == 1) {
277 plt_err("Deferred Rx start is not supported");
281 platform_ops = rte_mbuf_platform_mempool_ops();
282 /* This driver needs cnxk_npa mempool ops to work */
283 ops = rte_mempool_get_ops(mp->ops_index);
284 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
285 plt_err("mempool ops should be of cnxk_npa type");
289 if (mp->pool_id == 0) {
290 plt_err("Invalid pool_id");
294 /* Free memory prior to re-allocation if needed */
295 if (eth_dev->data->rx_queues[qid] != NULL) {
296 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
298 plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
299 dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
300 eth_dev->data->rx_queues[qid] = NULL;
306 cq->nb_desc = nb_desc;
307 rc = roc_nix_cq_init(&dev->nix, cq);
309 plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
316 rq->aura_handle = mp->pool_id;
317 rq->flow_tag_width = 32;
320 /* Calculate first mbuf skip */
321 first_skip = (sizeof(struct rte_mbuf));
322 first_skip += RTE_PKTMBUF_HEADROOM;
323 first_skip += rte_pktmbuf_priv_size(mp);
324 rq->first_skip = first_skip;
325 rq->later_skip = sizeof(struct rte_mbuf);
326 rq->lpb_size = mp->elt_size;
328 rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
330 plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
334 /* Allocate and setup fast path rx queue */
336 rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
337 rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
339 plt_err("Failed to alloc rx queue for rq=%d", qid);
343 /* Setup slow path fields */
346 rxq_sp->qconf.conf.rx = *rx_conf;
347 rxq_sp->qconf.nb_desc = nb_desc;
348 rxq_sp->qconf.mp = mp;
350 plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
353 /* Store start of fast path area */
354 eth_dev->data->rx_queues[qid] = rxq_sp + 1;
355 eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
359 rc |= roc_nix_rq_fini(rq);
361 rc |= roc_nix_cq_fini(cq);
367 cnxk_nix_rx_queue_release(void *rxq)
369 struct cnxk_eth_rxq_sp *rxq_sp;
370 struct cnxk_eth_dev *dev;
371 struct roc_nix_rq *rq;
372 struct roc_nix_cq *cq;
379 rxq_sp = cnxk_eth_rxq_to_sp(rxq);
383 plt_nix_dbg("Releasing rxq %u", qid);
387 rc = roc_nix_rq_fini(rq);
389 plt_err("Failed to cleanup rq, rc=%d", rc);
393 rc = roc_nix_cq_fini(cq);
395 plt_err("Failed to cleanup cq, rc=%d", rc);
397 /* Finally free fast path area */
402 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
405 uint32_t flow_key_type[RSS_MAX_LEVELS][6] = {
406 {FLOW_KEY_TYPE_IPV4, FLOW_KEY_TYPE_IPV6, FLOW_KEY_TYPE_TCP,
407 FLOW_KEY_TYPE_UDP, FLOW_KEY_TYPE_SCTP, FLOW_KEY_TYPE_ETH_DMAC},
408 {FLOW_KEY_TYPE_INNR_IPV4, FLOW_KEY_TYPE_INNR_IPV6,
409 FLOW_KEY_TYPE_INNR_TCP, FLOW_KEY_TYPE_INNR_UDP,
410 FLOW_KEY_TYPE_INNR_SCTP, FLOW_KEY_TYPE_INNR_ETH_DMAC},
411 {FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_INNR_IPV4,
412 FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_INNR_IPV6,
413 FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_INNR_TCP,
414 FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_INNR_UDP,
415 FLOW_KEY_TYPE_SCTP | FLOW_KEY_TYPE_INNR_SCTP,
416 FLOW_KEY_TYPE_ETH_DMAC | FLOW_KEY_TYPE_INNR_ETH_DMAC}
418 uint32_t flowkey_cfg = 0;
420 dev->ethdev_rss_hf = ethdev_rss;
422 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
423 flowkey_cfg |= FLOW_KEY_TYPE_CH_LEN_90B;
425 if (ethdev_rss & ETH_RSS_C_VLAN)
426 flowkey_cfg |= FLOW_KEY_TYPE_VLAN;
428 if (ethdev_rss & ETH_RSS_L3_SRC_ONLY)
429 flowkey_cfg |= FLOW_KEY_TYPE_L3_SRC;
431 if (ethdev_rss & ETH_RSS_L3_DST_ONLY)
432 flowkey_cfg |= FLOW_KEY_TYPE_L3_DST;
434 if (ethdev_rss & ETH_RSS_L4_SRC_ONLY)
435 flowkey_cfg |= FLOW_KEY_TYPE_L4_SRC;
437 if (ethdev_rss & ETH_RSS_L4_DST_ONLY)
438 flowkey_cfg |= FLOW_KEY_TYPE_L4_DST;
440 if (ethdev_rss & RSS_IPV4_ENABLE)
441 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV4_INDEX];
443 if (ethdev_rss & RSS_IPV6_ENABLE)
444 flowkey_cfg |= flow_key_type[rss_level][RSS_IPV6_INDEX];
446 if (ethdev_rss & ETH_RSS_TCP)
447 flowkey_cfg |= flow_key_type[rss_level][RSS_TCP_INDEX];
449 if (ethdev_rss & ETH_RSS_UDP)
450 flowkey_cfg |= flow_key_type[rss_level][RSS_UDP_INDEX];
452 if (ethdev_rss & ETH_RSS_SCTP)
453 flowkey_cfg |= flow_key_type[rss_level][RSS_SCTP_INDEX];
455 if (ethdev_rss & ETH_RSS_L2_PAYLOAD)
456 flowkey_cfg |= flow_key_type[rss_level][RSS_DMAC_INDEX];
458 if (ethdev_rss & RSS_IPV6_EX_ENABLE)
459 flowkey_cfg |= FLOW_KEY_TYPE_IPV6_EXT;
461 if (ethdev_rss & ETH_RSS_PORT)
462 flowkey_cfg |= FLOW_KEY_TYPE_PORT;
464 if (ethdev_rss & ETH_RSS_NVGRE)
465 flowkey_cfg |= FLOW_KEY_TYPE_NVGRE;
467 if (ethdev_rss & ETH_RSS_VXLAN)
468 flowkey_cfg |= FLOW_KEY_TYPE_VXLAN;
470 if (ethdev_rss & ETH_RSS_GENEVE)
471 flowkey_cfg |= FLOW_KEY_TYPE_GENEVE;
473 if (ethdev_rss & ETH_RSS_GTPU)
474 flowkey_cfg |= FLOW_KEY_TYPE_GTPU;
480 nix_free_queue_mem(struct cnxk_eth_dev *dev)
491 nix_rss_default_setup(struct cnxk_eth_dev *dev)
493 struct rte_eth_dev *eth_dev = dev->eth_dev;
494 uint8_t rss_hash_level;
495 uint32_t flowkey_cfg;
498 rss_hf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
499 rss_hash_level = ETH_RSS_LEVEL(rss_hf);
503 flowkey_cfg = cnxk_rss_ethdev_to_nix(dev, rss_hf, rss_hash_level);
504 return roc_nix_rss_default_setup(&dev->nix, flowkey_cfg);
508 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
510 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
511 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
512 struct cnxk_eth_qconf *tx_qconf = NULL;
513 struct cnxk_eth_qconf *rx_qconf = NULL;
514 struct cnxk_eth_rxq_sp *rxq_sp;
515 struct cnxk_eth_txq_sp *txq_sp;
516 int i, nb_rxq, nb_txq;
519 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
520 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
522 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
523 if (tx_qconf == NULL) {
524 plt_err("Failed to allocate memory for tx_qconf");
528 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
529 if (rx_qconf == NULL) {
530 plt_err("Failed to allocate memory for rx_qconf");
534 txq = eth_dev->data->tx_queues;
535 for (i = 0; i < nb_txq; i++) {
536 if (txq[i] == NULL) {
537 tx_qconf[i].valid = false;
538 plt_info("txq[%d] is already released", i);
541 txq_sp = cnxk_eth_txq_to_sp(txq[i]);
542 memcpy(&tx_qconf[i], &txq_sp->qconf, sizeof(*tx_qconf));
543 tx_qconf[i].valid = true;
544 dev_ops->tx_queue_release(txq[i]);
545 eth_dev->data->tx_queues[i] = NULL;
548 rxq = eth_dev->data->rx_queues;
549 for (i = 0; i < nb_rxq; i++) {
550 if (rxq[i] == NULL) {
551 rx_qconf[i].valid = false;
552 plt_info("rxq[%d] is already released", i);
555 rxq_sp = cnxk_eth_rxq_to_sp(rxq[i]);
556 memcpy(&rx_qconf[i], &rxq_sp->qconf, sizeof(*rx_qconf));
557 rx_qconf[i].valid = true;
558 dev_ops->rx_queue_release(rxq[i]);
559 eth_dev->data->rx_queues[i] = NULL;
562 dev->tx_qconf = tx_qconf;
563 dev->rx_qconf = rx_qconf;
573 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
575 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
576 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
577 struct cnxk_eth_qconf *tx_qconf = dev->tx_qconf;
578 struct cnxk_eth_qconf *rx_qconf = dev->rx_qconf;
579 int rc, i, nb_rxq, nb_txq;
582 nb_rxq = RTE_MIN(dev->nb_rxq, eth_dev->data->nb_rx_queues);
583 nb_txq = RTE_MIN(dev->nb_txq, eth_dev->data->nb_tx_queues);
586 /* Setup tx & rx queues with previous configuration so
587 * that the queues can be functional in cases like ports
588 * are started without re configuring queues.
590 * Usual re config sequence is like below:
596 * queue_configure() {
603 * In some application's control path, queue_configure() would
604 * NOT be invoked for TXQs/RXQs in port_configure().
605 * In such cases, queues can be functional after start as the
606 * queues are already setup in port_configure().
608 for (i = 0; i < nb_txq; i++) {
609 if (!tx_qconf[i].valid)
611 rc = dev_ops->tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc, 0,
612 &tx_qconf[i].conf.tx);
614 plt_err("Failed to setup tx queue rc=%d", rc);
615 txq = eth_dev->data->tx_queues;
616 for (i -= 1; i >= 0; i--)
617 dev_ops->tx_queue_release(txq[i]);
625 for (i = 0; i < nb_rxq; i++) {
626 if (!rx_qconf[i].valid)
628 rc = dev_ops->rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc, 0,
629 &rx_qconf[i].conf.rx,
632 plt_err("Failed to setup rx queue rc=%d", rc);
633 rxq = eth_dev->data->rx_queues;
634 for (i -= 1; i >= 0; i--)
635 dev_ops->rx_queue_release(rxq[i]);
636 goto tx_queue_release;
646 txq = eth_dev->data->tx_queues;
647 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
648 dev_ops->tx_queue_release(txq[i]);
659 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
669 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
671 /* These dummy functions are required for supporting
672 * some applications which reconfigure queues without
673 * stopping tx burst and rx burst threads(eg kni app)
674 * When the queues context is saved, txq/rxqs are released
675 * which caused app crash since rx/tx burst is still
676 * on different lcores
678 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
679 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
684 nix_lso_tun_fmt_update(struct cnxk_eth_dev *dev)
686 uint8_t udp_tun[ROC_NIX_LSO_TUN_MAX];
687 uint8_t tun[ROC_NIX_LSO_TUN_MAX];
688 struct roc_nix *nix = &dev->nix;
691 rc = roc_nix_lso_fmt_get(nix, udp_tun, tun);
695 dev->lso_tun_fmt = ((uint64_t)tun[ROC_NIX_LSO_TUN_V4V4] |
696 (uint64_t)tun[ROC_NIX_LSO_TUN_V4V6] << 8 |
697 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V4] << 16 |
698 (uint64_t)tun[ROC_NIX_LSO_TUN_V6V6] << 24);
700 dev->lso_tun_fmt |= ((uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V4] << 32 |
701 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V4V6] << 40 |
702 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V4] << 48 |
703 (uint64_t)udp_tun[ROC_NIX_LSO_TUN_V6V6] << 56);
708 nix_lso_fmt_setup(struct cnxk_eth_dev *dev)
710 struct roc_nix *nix = &dev->nix;
713 /* Nothing much to do if offload is not enabled */
714 if (!(dev->tx_offloads &
715 (DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
716 DEV_TX_OFFLOAD_GENEVE_TNL_TSO | DEV_TX_OFFLOAD_GRE_TNL_TSO)))
719 /* Setup LSO formats in AF. Its a no-op if other ethdev has
722 rc = roc_nix_lso_fmt_setup(nix);
726 return nix_lso_tun_fmt_update(dev);
730 cnxk_nix_configure(struct rte_eth_dev *eth_dev)
732 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
733 struct rte_eth_dev_data *data = eth_dev->data;
734 struct rte_eth_conf *conf = &data->dev_conf;
735 struct rte_eth_rxmode *rxmode = &conf->rxmode;
736 struct rte_eth_txmode *txmode = &conf->txmode;
737 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
738 struct roc_nix_fc_cfg fc_cfg = {0};
739 struct roc_nix *nix = &dev->nix;
740 struct rte_ether_addr *ea;
741 uint8_t nb_rxq, nb_txq;
749 if (rte_eal_has_hugepages() == 0) {
750 plt_err("Huge page is not configured");
754 if (conf->dcb_capability_en == 1) {
755 plt_err("dcb enable is not supported");
759 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
760 plt_err("Flow director is not supported");
764 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
765 rxmode->mq_mode != ETH_MQ_RX_RSS) {
766 plt_err("Unsupported mq rx mode %d", rxmode->mq_mode);
770 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
771 plt_err("Unsupported mq tx mode %d", txmode->mq_mode);
775 /* Free the resources allocated from the previous configure */
776 if (dev->configured == 1) {
777 /* Unregister queue irq's */
778 roc_nix_unregister_queue_irqs(nix);
780 /* Unregister CQ irqs if present */
781 if (eth_dev->data->dev_conf.intr_conf.rxq)
782 roc_nix_unregister_cq_irqs(nix);
784 /* Set no-op functions */
785 nix_set_nop_rxtx_function(eth_dev);
786 /* Store queue config for later */
787 rc = nix_store_queue_cfg_and_then_release(eth_dev);
790 roc_nix_tm_fini(nix);
791 roc_nix_lf_free(nix);
794 dev->rx_offloads = rxmode->offloads;
795 dev->tx_offloads = txmode->offloads;
798 rx_cfg = ROC_NIX_LF_RX_CFG_DIS_APAD;
799 if (dev->rx_offloads &
800 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
801 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_OL4;
802 rx_cfg |= ROC_NIX_LF_RX_CFG_CSUM_IL4;
804 rx_cfg |= (ROC_NIX_LF_RX_CFG_DROP_RE | ROC_NIX_LF_RX_CFG_L2_LEN_ERR |
805 ROC_NIX_LF_RX_CFG_LEN_IL4 | ROC_NIX_LF_RX_CFG_LEN_IL3 |
806 ROC_NIX_LF_RX_CFG_LEN_OL4 | ROC_NIX_LF_RX_CFG_LEN_OL3);
808 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
809 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
812 rc = roc_nix_lf_alloc(nix, nb_rxq, nb_txq, rx_cfg);
814 plt_err("Failed to init nix_lf rc=%d", rc);
818 nb_rxq = data->nb_rx_queues;
819 nb_txq = data->nb_tx_queues;
822 /* Allocate memory for roc rq's and cq's */
823 qs = plt_zmalloc(sizeof(struct roc_nix_rq) * nb_rxq, 0);
825 plt_err("Failed to alloc rqs");
830 qs = plt_zmalloc(sizeof(struct roc_nix_cq) * nb_rxq, 0);
832 plt_err("Failed to alloc cqs");
839 /* Allocate memory for roc sq's */
840 qs = plt_zmalloc(sizeof(struct roc_nix_sq) * nb_txq, 0);
842 plt_err("Failed to alloc sqs");
848 /* Re-enable NIX LF error interrupts */
849 roc_nix_err_intr_ena_dis(nix, true);
850 roc_nix_ras_intr_ena_dis(nix, true);
852 if (nix->rx_ptp_ena) {
853 plt_err("Both PTP and switch header enabled");
857 /* Setup LSO if needed */
858 rc = nix_lso_fmt_setup(dev);
860 plt_err("Failed to setup nix lso format fields, rc=%d", rc);
865 rc = nix_rss_default_setup(dev);
867 plt_err("Failed to configure rss rc=%d", rc);
871 /* Init the default TM scheduler hierarchy */
872 rc = roc_nix_tm_init(nix);
874 plt_err("Failed to init traffic manager, rc=%d", rc);
878 rc = roc_nix_tm_hierarchy_enable(nix, ROC_NIX_TM_DEFAULT, false);
880 plt_err("Failed to enable default tm hierarchy, rc=%d", rc);
884 /* Register queue IRQs */
885 rc = roc_nix_register_queue_irqs(nix);
887 plt_err("Failed to register queue interrupts rc=%d", rc);
891 /* Register cq IRQs */
892 if (eth_dev->data->dev_conf.intr_conf.rxq) {
893 if (eth_dev->data->nb_rx_queues > dev->nix.cints) {
894 plt_err("Rx interrupt cannot be enabled, rxq > %d",
898 /* Rx interrupt feature cannot work with vector mode because,
899 * vector mode does not process packets unless min 4 pkts are
900 * received, while cq interrupts are generated even for 1 pkt
903 dev->scalar_ena = true;
905 rc = roc_nix_register_cq_irqs(nix);
907 plt_err("Failed to register CQ interrupts rc=%d", rc);
912 /* Configure loop back mode */
913 rc = roc_nix_mac_loopback_enable(nix,
914 eth_dev->data->dev_conf.lpbk_mode);
916 plt_err("Failed to configure cgx loop back mode rc=%d", rc);
920 /* Init flow control configuration */
921 fc_cfg.cq_cfg_valid = false;
922 fc_cfg.rxchan_cfg.enable = true;
923 rc = roc_nix_fc_config_set(nix, &fc_cfg);
925 plt_err("Failed to initialize flow control rc=%d", rc);
929 /* Update flow control configuration to PMD */
930 rc = nix_init_flow_ctrl_config(eth_dev);
932 plt_err("Failed to initialize flow control rc=%d", rc);
936 * Restore queue config when reconfigure followed by
937 * reconfigure and no queue configure invoked from application case.
939 if (dev->configured == 1) {
940 rc = nix_restore_queue_cfg(eth_dev);
945 /* Update the mac address */
946 ea = eth_dev->data->mac_addrs;
947 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
948 if (rte_is_zero_ether_addr(ea))
949 rte_eth_random_addr((uint8_t *)ea);
951 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
953 plt_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
954 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 "",
955 eth_dev->data->port_id, ea_fmt, nb_rxq, nb_txq,
956 dev->rx_offloads, dev->tx_offloads);
960 dev->nb_rxq = data->nb_rx_queues;
961 dev->nb_txq = data->nb_tx_queues;
965 roc_nix_unregister_cq_irqs(nix);
967 roc_nix_unregister_queue_irqs(nix);
969 roc_nix_tm_fini(nix);
971 nix_free_queue_mem(dev);
972 rc |= roc_nix_lf_free(nix);
979 cnxk_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
981 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
982 struct rte_eth_dev_data *data = eth_dev->data;
983 struct roc_nix_sq *sq = &dev->sqs[qid];
986 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
989 rc = roc_nix_tm_sq_aura_fc(sq, true);
991 plt_err("Failed to enable sq aura fc, txq=%u, rc=%d", qid, rc);
995 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1001 cnxk_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1003 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1004 struct rte_eth_dev_data *data = eth_dev->data;
1005 struct roc_nix_sq *sq = &dev->sqs[qid];
1008 if (data->tx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1011 rc = roc_nix_tm_sq_aura_fc(sq, false);
1013 plt_err("Failed to disable sqb aura fc, txq=%u, rc=%d", qid,
1018 data->tx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1024 cnxk_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qid)
1026 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1027 struct rte_eth_dev_data *data = eth_dev->data;
1028 struct roc_nix_rq *rq = &dev->rqs[qid];
1031 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STARTED)
1034 rc = roc_nix_rq_ena_dis(rq, true);
1036 plt_err("Failed to enable rxq=%u, rc=%d", qid, rc);
1040 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STARTED;
1046 cnxk_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qid)
1048 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1049 struct rte_eth_dev_data *data = eth_dev->data;
1050 struct roc_nix_rq *rq = &dev->rqs[qid];
1053 if (data->rx_queue_state[qid] == RTE_ETH_QUEUE_STATE_STOPPED)
1056 rc = roc_nix_rq_ena_dis(rq, false);
1058 plt_err("Failed to disable rxq=%u, rc=%d", qid, rc);
1062 data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
1068 cnxk_nix_dev_stop(struct rte_eth_dev *eth_dev)
1070 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1071 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1072 struct rte_mbuf *rx_pkts[32];
1073 int count, i, j, rc;
1076 /* Disable switch hdr pkind */
1077 roc_nix_switch_hdr_set(&dev->nix, 0);
1079 /* Stop link change events */
1080 if (!roc_nix_is_vf_or_sdp(&dev->nix))
1081 roc_nix_mac_link_event_start_stop(&dev->nix, false);
1083 /* Disable Rx via NPC */
1084 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1086 /* Stop rx queues and free up pkts pending */
1087 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1088 rc = dev_ops->rx_queue_stop(eth_dev, i);
1092 rxq = eth_dev->data->rx_queues[i];
1093 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1095 for (j = 0; j < count; j++)
1096 rte_pktmbuf_free(rx_pkts[j]);
1097 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1101 /* Stop tx queues */
1102 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1103 dev_ops->tx_queue_stop(eth_dev, i);
1109 cnxk_nix_dev_start(struct rte_eth_dev *eth_dev)
1111 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1114 if (eth_dev->data->nb_rx_queues != 0) {
1115 rc = nix_recalc_mtu(eth_dev);
1120 /* Start rx queues */
1121 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1122 rc = cnxk_nix_rx_queue_start(eth_dev, i);
1127 /* Start tx queues */
1128 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1129 rc = cnxk_nix_tx_queue_start(eth_dev, i);
1134 /* Update Flow control configuration */
1135 rc = nix_update_flow_ctrl_config(eth_dev);
1137 plt_err("Failed to enable flow control. error code(%d)", rc);
1141 /* Enable Rx in NPC */
1142 rc = roc_nix_npc_rx_ena_dis(&dev->nix, true);
1144 plt_err("Failed to enable NPC rx %d", rc);
1148 cnxk_nix_toggle_flag_link_cfg(dev, true);
1150 /* Start link change events */
1151 if (!roc_nix_is_vf_or_sdp(&dev->nix)) {
1152 rc = roc_nix_mac_link_event_start_stop(&dev->nix, true);
1154 plt_err("Failed to start cgx link event %d", rc);
1159 cnxk_nix_toggle_flag_link_cfg(dev, false);
1164 roc_nix_npc_rx_ena_dis(&dev->nix, false);
1165 cnxk_nix_toggle_flag_link_cfg(dev, false);
1169 /* CNXK platform independent eth dev ops */
1170 struct eth_dev_ops cnxk_eth_dev_ops = {
1171 .mtu_set = cnxk_nix_mtu_set,
1172 .mac_addr_add = cnxk_nix_mac_addr_add,
1173 .mac_addr_remove = cnxk_nix_mac_addr_del,
1174 .mac_addr_set = cnxk_nix_mac_addr_set,
1175 .dev_infos_get = cnxk_nix_info_get,
1176 .link_update = cnxk_nix_link_update,
1177 .tx_queue_release = cnxk_nix_tx_queue_release,
1178 .rx_queue_release = cnxk_nix_rx_queue_release,
1179 .dev_stop = cnxk_nix_dev_stop,
1180 .tx_queue_start = cnxk_nix_tx_queue_start,
1181 .rx_queue_start = cnxk_nix_rx_queue_start,
1182 .rx_queue_stop = cnxk_nix_rx_queue_stop,
1183 .dev_supported_ptypes_get = cnxk_nix_supported_ptypes_get,
1184 .promiscuous_enable = cnxk_nix_promisc_enable,
1185 .promiscuous_disable = cnxk_nix_promisc_disable,
1186 .allmulticast_enable = cnxk_nix_allmulticast_enable,
1187 .allmulticast_disable = cnxk_nix_allmulticast_disable,
1188 .rx_burst_mode_get = cnxk_nix_rx_burst_mode_get,
1189 .tx_burst_mode_get = cnxk_nix_tx_burst_mode_get,
1190 .flow_ctrl_get = cnxk_nix_flow_ctrl_get,
1191 .flow_ctrl_set = cnxk_nix_flow_ctrl_set,
1192 .dev_set_link_up = cnxk_nix_set_link_up,
1193 .dev_set_link_down = cnxk_nix_set_link_down,
1194 .get_module_info = cnxk_nix_get_module_info,
1195 .get_module_eeprom = cnxk_nix_get_module_eeprom,
1196 .rx_queue_intr_enable = cnxk_nix_rx_queue_intr_enable,
1197 .rx_queue_intr_disable = cnxk_nix_rx_queue_intr_disable,
1198 .pool_ops_supported = cnxk_nix_pool_ops_supported,
1199 .queue_stats_mapping_set = cnxk_nix_queue_stats_mapping,
1200 .stats_get = cnxk_nix_stats_get,
1201 .stats_reset = cnxk_nix_stats_reset,
1205 cnxk_eth_dev_init(struct rte_eth_dev *eth_dev)
1207 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1208 struct roc_nix *nix = &dev->nix;
1209 struct rte_pci_device *pci_dev;
1210 int rc, max_entries;
1212 eth_dev->dev_ops = &cnxk_eth_dev_ops;
1214 /* For secondary processes, the primary has done all the work */
1215 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1218 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1219 rte_eth_copy_pci_info(eth_dev, pci_dev);
1221 /* Parse devargs string */
1222 rc = cnxk_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1224 plt_err("Failed to parse devargs rc=%d", rc);
1228 /* Initialize base roc nix */
1229 nix->pci_dev = pci_dev;
1230 rc = roc_nix_dev_init(nix);
1232 plt_err("Failed to initialize roc nix rc=%d", rc);
1236 /* Register up msg callbacks */
1237 roc_nix_mac_link_cb_register(nix, cnxk_eth_dev_link_status_cb);
1239 dev->eth_dev = eth_dev;
1240 dev->configured = 0;
1241 dev->ptype_disable = 0;
1243 /* For vfs, returned max_entries will be 0. but to keep default mac
1244 * address, one entry must be allocated. so setting up to 1.
1246 if (roc_nix_is_vf_or_sdp(nix))
1249 max_entries = roc_nix_mac_max_entries_get(nix);
1251 if (max_entries <= 0) {
1252 plt_err("Failed to get max entries for mac addr");
1257 eth_dev->data->mac_addrs =
1258 rte_zmalloc("mac_addr", max_entries * RTE_ETHER_ADDR_LEN, 0);
1259 if (eth_dev->data->mac_addrs == NULL) {
1260 plt_err("Failed to allocate memory for mac addr");
1265 dev->max_mac_entries = max_entries;
1267 /* Get mac address */
1268 rc = roc_nix_npc_mac_addr_get(nix, dev->mac_addr);
1270 plt_err("Failed to get mac addr, rc=%d", rc);
1271 goto free_mac_addrs;
1274 /* Update the mac address */
1275 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1277 if (!roc_nix_is_vf_or_sdp(nix)) {
1278 /* Sync same MAC address to CGX/RPM table */
1279 rc = roc_nix_mac_addr_set(nix, dev->mac_addr);
1281 plt_err("Failed to set mac addr, rc=%d", rc);
1282 goto free_mac_addrs;
1286 /* Union of all capabilities supported by CNXK.
1287 * Platform specific capabilities will be
1290 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1291 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1292 dev->speed_capa = nix_get_speed_capa(dev);
1294 /* Initialize roc npc */
1295 plt_nix_dbg("Port=%d pf=%d vf=%d ver=%s hwcap=0x%" PRIx64
1296 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1297 eth_dev->data->port_id, roc_nix_get_pf(nix),
1298 roc_nix_get_vf(nix), CNXK_ETH_DEV_PMD_VERSION, dev->hwcap,
1299 dev->rx_offload_capa, dev->tx_offload_capa);
1303 rte_free(eth_dev->data->mac_addrs);
1305 roc_nix_dev_fini(nix);
1307 plt_err("Failed to init nix eth_dev rc=%d", rc);
1312 cnxk_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
1314 struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
1315 const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
1316 struct roc_nix *nix = &dev->nix;
1319 /* Nothing to be done for secondary processes */
1320 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1323 /* Clear the flag since we are closing down */
1324 dev->configured = 0;
1326 roc_nix_npc_rx_ena_dis(nix, false);
1328 /* Disable link status events */
1329 roc_nix_mac_link_event_start_stop(nix, false);
1332 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1333 dev_ops->tx_queue_release(eth_dev->data->tx_queues[i]);
1334 eth_dev->data->tx_queues[i] = NULL;
1336 eth_dev->data->nb_tx_queues = 0;
1338 /* Free up RQ's and CQ's */
1339 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1340 dev_ops->rx_queue_release(eth_dev->data->rx_queues[i]);
1341 eth_dev->data->rx_queues[i] = NULL;
1343 eth_dev->data->nb_rx_queues = 0;
1345 /* Free tm resources */
1346 roc_nix_tm_fini(nix);
1348 /* Unregister queue irqs */
1349 roc_nix_unregister_queue_irqs(nix);
1351 /* Unregister cq irqs */
1352 if (eth_dev->data->dev_conf.intr_conf.rxq)
1353 roc_nix_unregister_cq_irqs(nix);
1355 /* Free ROC RQ's, SQ's and CQ's memory */
1356 nix_free_queue_mem(dev);
1358 /* Free nix lf resources */
1359 rc = roc_nix_lf_free(nix);
1361 plt_err("Failed to free nix lf, rc=%d", rc);
1363 rte_free(eth_dev->data->mac_addrs);
1364 eth_dev->data->mac_addrs = NULL;
1366 /* Check if mbox close is needed */
1370 rc = roc_nix_dev_fini(nix);
1371 /* Can be freed later by PMD if NPA LF is in use */
1372 if (rc == -EAGAIN) {
1373 eth_dev->data->dev_private = NULL;
1376 plt_err("Failed in nix dev fini, rc=%d", rc);
1383 cnxk_nix_remove(struct rte_pci_device *pci_dev)
1385 struct rte_eth_dev *eth_dev;
1386 struct roc_nix *nix;
1389 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1391 /* Cleanup eth dev */
1392 rc = cnxk_eth_dev_uninit(eth_dev, true);
1396 rte_eth_dev_release_port(eth_dev);
1399 /* Nothing to be done for secondary processes */
1400 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1403 /* Check if this device is hosting common resource */
1404 nix = roc_idev_npa_nix_get();
1405 if (nix->pci_dev != pci_dev)
1408 /* Try nix fini now */
1409 rc = roc_nix_dev_fini(nix);
1410 if (rc == -EAGAIN) {
1411 plt_info("%s: common resource in use by other devices",
1415 plt_err("Failed in nix dev fini, rc=%d", rc);
1419 /* Free device pointer as rte_ethdev does not have it anymore */
1426 cnxk_nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1430 RTE_SET_USED(pci_drv);
1432 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct cnxk_eth_dev),
1435 /* On error on secondary, recheck if port exists in primary or
1436 * in mid of detach state.
1438 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1439 if (!rte_eth_dev_allocated(pci_dev->device.name))