1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_ethdev_pci.h>
10 #include <rte_malloc.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_mempool.h>
15 #include "otx2_ethdev.h"
18 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
20 RTE_SET_USED(eth_dev);
24 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
26 RTE_SET_USED(eth_dev);
29 static inline uint64_t
30 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
32 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
34 if (otx2_dev_is_vf(dev))
35 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
40 static inline uint64_t
41 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
45 return NIX_TX_OFFLOAD_CAPA;
48 static const struct otx2_dev_ops otx2_dev_ops = {
49 .link_status_update = otx2_eth_dev_link_status_update,
53 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
55 struct otx2_mbox *mbox = dev->mbox;
56 struct nix_lf_alloc_req *req;
57 struct nix_lf_alloc_rsp *rsp;
60 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
64 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
65 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
66 req->xqe_sz = NIX_XQESZ_W16;
67 req->rss_sz = dev->rss_info.rss_size;
68 req->rss_grps = NIX_RSS_GRPS;
69 req->npa_func = otx2_npa_pf_func_get();
70 req->sso_func = otx2_sso_pf_func_get();
71 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
72 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
73 DEV_RX_OFFLOAD_UDP_CKSUM)) {
74 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
75 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
78 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
82 dev->sqb_size = rsp->sqb_size;
83 dev->tx_chan_base = rsp->tx_chan_base;
84 dev->rx_chan_base = rsp->rx_chan_base;
85 dev->rx_chan_cnt = rsp->rx_chan_cnt;
86 dev->tx_chan_cnt = rsp->tx_chan_cnt;
87 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
88 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
89 dev->lf_tx_stats = rsp->lf_tx_stats;
90 dev->lf_rx_stats = rsp->lf_rx_stats;
91 dev->cints = rsp->cints;
92 dev->qints = rsp->qints;
93 dev->npc_flow.channel = dev->rx_chan_base;
99 nix_lf_free(struct otx2_eth_dev *dev)
101 struct otx2_mbox *mbox = dev->mbox;
102 struct nix_lf_free_req *req;
103 struct ndc_sync_op *ndc_req;
106 /* Sync NDC-NIX for LF */
107 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
108 ndc_req->nix_lf_tx_sync = 1;
109 ndc_req->nix_lf_rx_sync = 1;
110 rc = otx2_mbox_process(mbox);
112 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
114 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
115 /* Let AF driver free all this nix lf's
116 * NPC entries allocated using NPC MBOX.
120 return otx2_mbox_process(mbox);
124 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
126 struct otx2_mbox *mbox = dev->mbox;
128 if (otx2_dev_is_vf(dev))
131 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
133 return otx2_mbox_process(mbox);
137 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
139 struct otx2_mbox *mbox = dev->mbox;
141 if (otx2_dev_is_vf(dev))
144 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
146 return otx2_mbox_process(mbox);
150 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
156 static inline uint32_t
157 nix_qsize_to_val(enum nix_q_size_e qsize)
159 return (16UL << (qsize * 2));
162 static inline enum nix_q_size_e
163 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
167 if (otx2_ethdev_fixup_is_min_4k_q(dev))
172 for (; i < nix_q_size_max; i++)
173 if (val <= nix_qsize_to_val(i))
176 if (i >= nix_q_size_max)
177 i = nix_q_size_max - 1;
183 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
184 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
186 struct otx2_mbox *mbox = dev->mbox;
187 const struct rte_memzone *rz;
188 uint32_t ring_size, cq_size;
189 struct nix_aq_enq_req *aq;
194 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
195 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
196 NIX_CQ_ALIGN, dev->node);
198 otx2_err("Failed to allocate mem for cq hw ring");
202 memset(rz->addr, 0, rz->len);
203 rxq->desc = (uintptr_t)rz->addr;
204 rxq->qmask = cq_size - 1;
206 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
208 aq->ctype = NIX_AQ_CTYPE_CQ;
209 aq->op = NIX_AQ_INSTOP_INIT;
213 aq->cq.qsize = rxq->qsize;
214 aq->cq.base = rz->iova;
215 aq->cq.avg_level = 0xff;
216 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
217 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
219 /* Many to one reduction */
220 aq->cq.qint_idx = qid % dev->qints;
222 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
223 uint16_t min_rx_drop;
224 const float rx_cq_skid = 1024 * 256;
226 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
227 aq->cq.drop = min_rx_drop;
231 rc = otx2_mbox_process(mbox);
233 otx2_err("Failed to init cq context");
237 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
239 aq->ctype = NIX_AQ_CTYPE_RQ;
240 aq->op = NIX_AQ_INSTOP_INIT;
243 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
245 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
246 first_skip = (sizeof(struct rte_mbuf));
247 first_skip += RTE_PKTMBUF_HEADROOM;
248 first_skip += rte_pktmbuf_priv_size(mp);
249 rxq->data_off = first_skip;
251 first_skip /= 8; /* Expressed in number of dwords */
252 aq->rq.first_skip = first_skip;
253 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
254 aq->rq.flow_tagw = 32; /* 32-bits */
255 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
256 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
257 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
258 aq->rq.lpb_sizem1 /= 8;
259 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
261 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
262 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
263 aq->rq.rq_int_ena = 0;
264 /* Many to one reduction */
265 aq->rq.qint_idx = qid % dev->qints;
267 if (otx2_ethdev_fixup_is_limit_cq_full(dev))
268 aq->rq.xqe_drop_ena = 1;
270 rc = otx2_mbox_process(mbox);
272 otx2_err("Failed to init rq context");
282 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
283 struct otx2_eth_rxq *rxq, const bool enb)
285 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
286 struct otx2_mbox *mbox = dev->mbox;
287 struct nix_aq_enq_req *aq;
289 /* Pkts will be dropped silently if RQ is disabled */
290 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
292 aq->ctype = NIX_AQ_CTYPE_RQ;
293 aq->op = NIX_AQ_INSTOP_WRITE;
296 aq->rq_mask.ena = ~(aq->rq_mask.ena);
298 return otx2_mbox_process(mbox);
302 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
304 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
305 struct otx2_mbox *mbox = dev->mbox;
306 struct nix_aq_enq_req *aq;
309 /* RQ is already disabled */
311 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
313 aq->ctype = NIX_AQ_CTYPE_CQ;
314 aq->op = NIX_AQ_INSTOP_WRITE;
317 aq->cq_mask.ena = ~(aq->cq_mask.ena);
319 rc = otx2_mbox_process(mbox);
321 otx2_err("Failed to disable cq context");
329 nix_get_data_off(struct otx2_eth_dev *dev)
337 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
339 struct rte_mbuf mb_def;
342 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
343 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
344 offsetof(struct rte_mbuf, data_off) != 2);
345 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
346 offsetof(struct rte_mbuf, data_off) != 4);
347 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
348 offsetof(struct rte_mbuf, data_off) != 6);
350 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
351 mb_def.port = port_id;
352 rte_mbuf_refcnt_set(&mb_def, 1);
354 /* Prevent compiler reordering: rearm_data covers previous fields */
355 rte_compiler_barrier();
356 tmp = (uint64_t *)&mb_def.rearm_data;
362 otx2_nix_rx_queue_release(void *rx_queue)
364 struct otx2_eth_rxq *rxq = rx_queue;
369 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
370 nix_cq_rq_uninit(rxq->eth_dev, rxq);
375 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
376 uint16_t nb_desc, unsigned int socket,
377 const struct rte_eth_rxconf *rx_conf,
378 struct rte_mempool *mp)
380 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
381 struct rte_mempool_ops *ops;
382 struct otx2_eth_rxq *rxq;
383 const char *platform_ops;
384 enum nix_q_size_e qsize;
390 /* Compile time check to make sure all fast path elements in a CL */
391 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
394 if (rx_conf->rx_deferred_start == 1) {
395 otx2_err("Deferred Rx start is not supported");
399 platform_ops = rte_mbuf_platform_mempool_ops();
400 /* This driver needs octeontx2_npa mempool ops to work */
401 ops = rte_mempool_get_ops(mp->ops_index);
402 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
403 otx2_err("mempool ops should be of octeontx2_npa type");
407 if (mp->pool_id == 0) {
408 otx2_err("Invalid pool_id");
412 /* Free memory prior to re-allocation if needed */
413 if (eth_dev->data->rx_queues[rq] != NULL) {
414 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
415 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
416 eth_dev->data->rx_queues[rq] = NULL;
419 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
420 dev->rx_offloads |= offloads;
422 /* Find the CQ queue size */
423 qsize = nix_qsize_clampup_get(dev, nb_desc);
424 /* Allocate rxq memory */
425 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
427 otx2_err("Failed to allocate rq=%d", rq);
432 rxq->eth_dev = eth_dev;
434 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
435 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
436 rxq->wdata = (uint64_t)rq << 32;
437 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
438 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
439 eth_dev->data->port_id);
440 rxq->offloads = offloads;
442 rxq->qlen = nix_qsize_to_val(qsize);
445 /* Alloc completion queue */
446 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
448 otx2_err("Failed to allocate rxq=%u", rq);
452 rxq->qconf.socket_id = socket;
453 rxq->qconf.nb_desc = nb_desc;
454 rxq->qconf.mempool = mp;
455 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
457 nix_rx_queue_reset(rxq);
458 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
459 rq, mp->name, qsize, nb_desc, rxq->qlen);
461 eth_dev->data->rx_queues[rq] = rxq;
462 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
466 otx2_nix_rx_queue_release(rxq);
471 static inline uint8_t
472 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
475 * Maximum three segments can be supported with W8, Choose
476 * NIX_MAXSQESZ_W16 for multi segment offload.
478 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
479 return NIX_MAXSQESZ_W16;
481 return NIX_MAXSQESZ_W8;
485 nix_sq_init(struct otx2_eth_txq *txq)
487 struct otx2_eth_dev *dev = txq->dev;
488 struct otx2_mbox *mbox = dev->mbox;
489 struct nix_aq_enq_req *sq;
494 if (txq->sqb_pool->pool_id == 0)
497 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
499 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
503 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
505 sq->ctype = NIX_AQ_CTYPE_SQ;
506 sq->op = NIX_AQ_INSTOP_INIT;
507 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
510 sq->sq.smq_rr_quantum = rr_quantum;
511 sq->sq.default_chan = dev->tx_chan_base;
512 sq->sq.sqe_stype = NIX_STYPE_STF;
514 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
515 sq->sq.sqe_stype = NIX_STYPE_STP;
517 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
518 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
519 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
520 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
521 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
523 /* Many to one reduction */
524 sq->sq.qint_idx = txq->sq % dev->qints;
526 return otx2_mbox_process(mbox);
530 nix_sq_uninit(struct otx2_eth_txq *txq)
532 struct otx2_eth_dev *dev = txq->dev;
533 struct otx2_mbox *mbox = dev->mbox;
534 struct ndc_sync_op *ndc_req;
535 struct nix_aq_enq_rsp *rsp;
536 struct nix_aq_enq_req *aq;
537 uint16_t sqes_per_sqb;
541 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
543 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
545 aq->ctype = NIX_AQ_CTYPE_SQ;
546 aq->op = NIX_AQ_INSTOP_READ;
548 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
552 /* Check if sq is already cleaned up */
557 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
559 aq->ctype = NIX_AQ_CTYPE_SQ;
560 aq->op = NIX_AQ_INSTOP_WRITE;
562 aq->sq_mask.ena = ~aq->sq_mask.ena;
565 rc = otx2_mbox_process(mbox);
569 /* Read SQ and free sqb's */
570 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
572 aq->ctype = NIX_AQ_CTYPE_SQ;
573 aq->op = NIX_AQ_INSTOP_READ;
575 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
580 otx2_err("SQ has pending sqe's");
582 count = aq->sq.sqb_count;
583 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
584 /* Free SQB's that are used */
585 sqb_buf = (void *)rsp->sq.head_sqb;
589 next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) *
590 nix_sq_max_sqe_sz(txq)));
591 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
597 /* Free next to use sqb */
598 if (rsp->sq.next_sqb)
599 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
602 /* Sync NDC-NIX-TX for LF */
603 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
604 ndc_req->nix_lf_tx_sync = 1;
605 rc = otx2_mbox_process(mbox);
607 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
613 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
615 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
616 struct npa_aq_enq_req *aura_req;
618 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
619 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
620 aura_req->ctype = NPA_AQ_CTYPE_AURA;
621 aura_req->op = NPA_AQ_INSTOP_WRITE;
623 aura_req->aura.limit = nb_sqb_bufs;
624 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
626 return otx2_mbox_process(npa_lf->mbox);
630 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
632 struct otx2_eth_dev *dev = txq->dev;
633 uint16_t sqes_per_sqb, nb_sqb_bufs;
634 char name[RTE_MEMPOOL_NAMESIZE];
635 struct rte_mempool_objsz sz;
636 struct npa_aura_s *aura;
637 uint32_t tmp, blk_sz;
639 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
640 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
641 blk_sz = dev->sqb_size;
643 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
644 sqes_per_sqb = (dev->sqb_size / 8) / 16;
646 sqes_per_sqb = (dev->sqb_size / 8) / 8;
648 nb_sqb_bufs = nb_desc / sqes_per_sqb;
649 /* Clamp up to devarg passed SQB count */
650 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_MIN_SQB,
651 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
653 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
655 MEMPOOL_F_NO_SPREAD);
656 txq->nb_sqb_bufs = nb_sqb_bufs;
657 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
658 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
659 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
660 txq->nb_sqb_bufs_adj =
661 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
663 if (txq->sqb_pool == NULL) {
664 otx2_err("Failed to allocate sqe mempool");
668 memset(aura, 0, sizeof(*aura));
670 aura->fc_addr = txq->fc_iova;
671 aura->fc_hyst_bits = 0; /* Store count on all updates */
672 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
673 otx2_err("Failed to set ops for sqe mempool");
676 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
677 otx2_err("Failed to populate sqe mempool");
681 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
682 if (dev->sqb_size != sz.elt_size) {
683 otx2_err("sqe pool block size is not expected %d != %d",
688 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
696 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
698 struct nix_send_ext_s *send_hdr_ext;
699 struct nix_send_hdr_s *send_hdr;
700 struct nix_send_mem_s *send_mem;
701 union nix_send_sg_s *sg;
703 /* Initialize the fields based on basic single segment packet */
704 memset(&txq->cmd, 0, sizeof(txq->cmd));
706 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
707 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
708 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
709 send_hdr->w0.sizem1 = 2;
711 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
712 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
713 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
714 /* Default: one seg packet would have:
715 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
718 send_hdr->w0.sizem1 = 3;
719 send_hdr_ext->w0.tstmp = 1;
721 /* To calculate the offset for send_mem,
722 * send_hdr->w0.sizem1 * 2
724 send_mem = (struct nix_send_mem_s *)(txq->cmd +
725 (send_hdr->w0.sizem1 << 1));
726 send_mem->subdc = NIX_SUBDC_MEM;
728 send_mem->wmem = 0x1;
729 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
731 sg = (union nix_send_sg_s *)&txq->cmd[4];
733 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
734 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
735 send_hdr->w0.sizem1 = 1;
736 sg = (union nix_send_sg_s *)&txq->cmd[2];
739 send_hdr->w0.sq = txq->sq;
740 sg->subdc = NIX_SUBDC_SG;
742 sg->ld_type = NIX_SENDLDTYPE_LDD;
748 otx2_nix_tx_queue_release(void *_txq)
750 struct otx2_eth_txq *txq = _txq;
751 struct rte_eth_dev *eth_dev;
756 eth_dev = txq->dev->eth_dev;
758 otx2_nix_dbg("Releasing txq %u", txq->sq);
760 /* Flush and disable tm */
761 otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
763 /* Free sqb's and disable sq */
767 rte_mempool_free(txq->sqb_pool);
768 txq->sqb_pool = NULL;
775 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
776 uint16_t nb_desc, unsigned int socket_id,
777 const struct rte_eth_txconf *tx_conf)
779 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
780 const struct rte_memzone *fc;
781 struct otx2_eth_txq *txq;
787 /* Compile time check to make sure all fast path elements in a CL */
788 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
790 if (tx_conf->tx_deferred_start) {
791 otx2_err("Tx deferred start is not supported");
795 /* Free memory prior to re-allocation if needed. */
796 if (eth_dev->data->tx_queues[sq] != NULL) {
797 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
798 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
799 eth_dev->data->tx_queues[sq] = NULL;
802 /* Find the expected offloads for this queue */
803 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
805 /* Allocating tx queue data structure */
806 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
807 OTX2_ALIGN, socket_id);
809 otx2_err("Failed to alloc txq=%d", sq);
815 txq->sqb_pool = NULL;
816 txq->offloads = offloads;
817 dev->tx_offloads |= offloads;
820 * Allocate memory for flow control updates from HW.
821 * Alloc one cache line, so that fits all FC_STYPE modes.
823 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
824 OTX2_ALIGN + sizeof(struct npa_aura_s),
825 OTX2_ALIGN, dev->node);
827 otx2_err("Failed to allocate mem for fcmem");
831 txq->fc_iova = fc->iova;
832 txq->fc_mem = fc->addr;
834 /* Initialize the aura sqb pool */
835 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
837 otx2_err("Failed to alloc sqe pool rc=%d", rc);
841 /* Initialize the SQ */
842 rc = nix_sq_init(txq);
844 otx2_err("Failed to init sq=%d context", sq);
848 txq->fc_cache_pkts = 0;
849 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
850 /* Evenly distribute LMT slot for each sq */
851 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
853 txq->qconf.socket_id = socket_id;
854 txq->qconf.nb_desc = nb_desc;
855 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
857 otx2_nix_form_default_desc(txq);
859 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
860 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
861 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
862 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
863 eth_dev->data->tx_queues[sq] = txq;
864 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
868 otx2_nix_tx_queue_release(txq);
874 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
876 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
877 struct otx2_eth_qconf *tx_qconf = NULL;
878 struct otx2_eth_qconf *rx_qconf = NULL;
879 struct otx2_eth_txq **txq;
880 struct otx2_eth_rxq **rxq;
881 int i, nb_rxq, nb_txq;
883 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
884 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
886 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
887 if (tx_qconf == NULL) {
888 otx2_err("Failed to allocate memory for tx_qconf");
892 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
893 if (rx_qconf == NULL) {
894 otx2_err("Failed to allocate memory for rx_qconf");
898 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
899 for (i = 0; i < nb_txq; i++) {
900 if (txq[i] == NULL) {
901 otx2_err("txq[%d] is already released", i);
904 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
905 otx2_nix_tx_queue_release(txq[i]);
906 eth_dev->data->tx_queues[i] = NULL;
909 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
910 for (i = 0; i < nb_rxq; i++) {
911 if (rxq[i] == NULL) {
912 otx2_err("rxq[%d] is already released", i);
915 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
916 otx2_nix_rx_queue_release(rxq[i]);
917 eth_dev->data->rx_queues[i] = NULL;
920 dev->tx_qconf = tx_qconf;
921 dev->rx_qconf = rx_qconf;
934 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
936 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
937 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
938 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
939 struct otx2_eth_txq **txq;
940 struct otx2_eth_rxq **rxq;
941 int rc, i, nb_rxq, nb_txq;
943 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
944 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
947 /* Setup tx & rx queues with previous configuration so
948 * that the queues can be functional in cases like ports
949 * are started without re configuring queues.
951 * Usual re config sequence is like below:
957 * queue_configure() {
964 * In some application's control path, queue_configure() would
965 * NOT be invoked for TXQs/RXQs in port_configure().
966 * In such cases, queues can be functional after start as the
967 * queues are already setup in port_configure().
969 for (i = 0; i < nb_txq; i++) {
970 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
971 tx_qconf[i].socket_id,
972 &tx_qconf[i].conf.tx);
974 otx2_err("Failed to setup tx queue rc=%d", rc);
975 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
976 for (i -= 1; i >= 0; i--)
977 otx2_nix_tx_queue_release(txq[i]);
982 free(tx_qconf); tx_qconf = NULL;
984 for (i = 0; i < nb_rxq; i++) {
985 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
986 rx_qconf[i].socket_id,
987 &rx_qconf[i].conf.rx,
988 rx_qconf[i].mempool);
990 otx2_err("Failed to setup rx queue rc=%d", rc);
991 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
992 for (i -= 1; i >= 0; i--)
993 otx2_nix_rx_queue_release(rxq[i]);
994 goto release_tx_queues;
998 free(rx_qconf); rx_qconf = NULL;
1003 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1004 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1005 otx2_nix_tx_queue_release(txq[i]);
1016 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1018 RTE_SET_USED(queue);
1019 RTE_SET_USED(mbufs);
1026 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1028 /* These dummy functions are required for supporting
1029 * some applications which reconfigure queues without
1030 * stopping tx burst and rx burst threads(eg kni app)
1031 * When the queues context is saved, txq/rxqs are released
1032 * which caused app crash since rx/tx burst is still
1033 * on different lcores
1035 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1036 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1041 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1043 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1044 struct rte_eth_dev_data *data = eth_dev->data;
1045 struct rte_eth_conf *conf = &data->dev_conf;
1046 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1047 struct rte_eth_txmode *txmode = &conf->txmode;
1048 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1049 struct rte_ether_addr *ea;
1050 uint8_t nb_rxq, nb_txq;
1056 if (rte_eal_has_hugepages() == 0) {
1057 otx2_err("Huge page is not configured");
1061 if (rte_eal_iova_mode() != RTE_IOVA_VA) {
1062 otx2_err("iova mode should be va");
1066 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1067 otx2_err("Setting link speed/duplex not supported");
1071 if (conf->dcb_capability_en == 1) {
1072 otx2_err("dcb enable is not supported");
1076 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1077 otx2_err("Flow director is not supported");
1081 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1082 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1083 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1087 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1088 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1092 /* Free the resources allocated from the previous configure */
1093 if (dev->configured == 1) {
1094 oxt2_nix_unregister_queue_irqs(eth_dev);
1095 nix_set_nop_rxtx_function(eth_dev);
1096 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1099 otx2_nix_tm_fini(eth_dev);
1103 if (otx2_dev_is_A0(dev) &&
1104 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1105 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1106 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1107 otx2_err("Outer IP and SCTP checksum unsupported");
1112 dev->rx_offloads = rxmode->offloads;
1113 dev->tx_offloads = txmode->offloads;
1114 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1116 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1117 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1119 /* Alloc a nix lf */
1120 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1122 otx2_err("Failed to init nix_lf rc=%d", rc);
1127 rc = otx2_nix_rss_config(eth_dev);
1129 otx2_err("Failed to configure rss rc=%d", rc);
1133 /* Init the default TM scheduler hierarchy */
1134 rc = otx2_nix_tm_init_default(eth_dev);
1136 otx2_err("Failed to init traffic manager rc=%d", rc);
1140 /* Register queue IRQs */
1141 rc = oxt2_nix_register_queue_irqs(eth_dev);
1143 otx2_err("Failed to register queue interrupts rc=%d", rc);
1148 * Restore queue config when reconfigure followed by
1149 * reconfigure and no queue configure invoked from application case.
1151 if (dev->configured == 1) {
1152 rc = nix_restore_queue_cfg(eth_dev);
1157 /* Update the mac address */
1158 ea = eth_dev->data->mac_addrs;
1159 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1160 if (rte_is_zero_ether_addr(ea))
1161 rte_eth_random_addr((uint8_t *)ea);
1163 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1165 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1166 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1167 " rx_flags=0x%x tx_flags=0x%x",
1168 eth_dev->data->port_id, ea_fmt, nb_rxq,
1169 nb_txq, dev->rx_offloads, dev->tx_offloads,
1170 dev->rx_offload_flags, dev->tx_offload_flags);
1173 dev->configured = 1;
1174 dev->configured_nb_rx_qs = data->nb_rx_queues;
1175 dev->configured_nb_tx_qs = data->nb_tx_queues;
1179 rc = nix_lf_free(dev);
1185 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1187 struct rte_eth_dev_data *data = eth_dev->data;
1188 struct otx2_eth_txq *txq;
1191 txq = eth_dev->data->tx_queues[qidx];
1193 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1196 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1198 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
1203 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1210 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1212 struct rte_eth_dev_data *data = eth_dev->data;
1213 struct otx2_eth_txq *txq;
1216 txq = eth_dev->data->tx_queues[qidx];
1218 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1221 txq->fc_cache_pkts = 0;
1223 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1225 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
1230 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1237 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1239 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1240 struct rte_eth_dev_data *data = eth_dev->data;
1243 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1246 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
1248 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
1252 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1259 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1261 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1262 struct rte_eth_dev_data *data = eth_dev->data;
1265 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1268 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
1270 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
1274 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1280 /* Initialize and register driver with DPDK Application */
1281 static const struct eth_dev_ops otx2_eth_dev_ops = {
1282 .dev_infos_get = otx2_nix_info_get,
1283 .dev_configure = otx2_nix_configure,
1284 .link_update = otx2_nix_link_update,
1285 .tx_queue_setup = otx2_nix_tx_queue_setup,
1286 .tx_queue_release = otx2_nix_tx_queue_release,
1287 .rx_queue_setup = otx2_nix_rx_queue_setup,
1288 .rx_queue_release = otx2_nix_rx_queue_release,
1289 .tx_queue_start = otx2_nix_tx_queue_start,
1290 .tx_queue_stop = otx2_nix_tx_queue_stop,
1291 .rx_queue_start = otx2_nix_rx_queue_start,
1292 .rx_queue_stop = otx2_nix_rx_queue_stop,
1293 .stats_get = otx2_nix_dev_stats_get,
1294 .stats_reset = otx2_nix_dev_stats_reset,
1295 .get_reg = otx2_nix_dev_get_reg,
1296 .mac_addr_add = otx2_nix_mac_addr_add,
1297 .mac_addr_remove = otx2_nix_mac_addr_del,
1298 .mac_addr_set = otx2_nix_mac_addr_set,
1299 .promiscuous_enable = otx2_nix_promisc_enable,
1300 .promiscuous_disable = otx2_nix_promisc_disable,
1301 .allmulticast_enable = otx2_nix_allmulticast_enable,
1302 .allmulticast_disable = otx2_nix_allmulticast_disable,
1303 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
1304 .reta_update = otx2_nix_dev_reta_update,
1305 .reta_query = otx2_nix_dev_reta_query,
1306 .rss_hash_update = otx2_nix_rss_hash_update,
1307 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
1308 .xstats_get = otx2_nix_xstats_get,
1309 .xstats_get_names = otx2_nix_xstats_get_names,
1310 .xstats_reset = otx2_nix_xstats_reset,
1311 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
1312 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
1316 nix_lf_attach(struct otx2_eth_dev *dev)
1318 struct otx2_mbox *mbox = dev->mbox;
1319 struct rsrc_attach_req *req;
1321 /* Attach NIX(lf) */
1322 req = otx2_mbox_alloc_msg_attach_resources(mbox);
1326 return otx2_mbox_process(mbox);
1330 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
1332 struct otx2_mbox *mbox = dev->mbox;
1333 struct msix_offset_rsp *msix_rsp;
1336 /* Get NPA and NIX MSIX vector offsets */
1337 otx2_mbox_alloc_msg_msix_offset(mbox);
1339 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
1341 dev->nix_msixoff = msix_rsp->nix_msixoff;
1347 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
1349 struct rsrc_detach_req *req;
1351 req = otx2_mbox_alloc_msg_detach_resources(mbox);
1353 /* Detach all except npa lf */
1354 req->partial = true;
1361 return otx2_mbox_process(mbox);
1365 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
1367 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1368 struct rte_pci_device *pci_dev;
1369 int rc, max_entries;
1371 eth_dev->dev_ops = &otx2_eth_dev_ops;
1373 /* For secondary processes, the primary has done all the work */
1374 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1375 /* Setup callbacks for secondary process */
1376 otx2_eth_set_tx_function(eth_dev);
1377 otx2_eth_set_rx_function(eth_dev);
1381 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1383 rte_eth_copy_pci_info(eth_dev, pci_dev);
1384 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1386 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
1387 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
1388 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
1390 /* Parse devargs string */
1391 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1393 otx2_err("Failed to parse devargs rc=%d", rc);
1397 if (!dev->mbox_active) {
1398 /* Initialize the base otx2_dev object
1399 * only if already present
1401 rc = otx2_dev_init(pci_dev, dev);
1403 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1407 /* Device generic callbacks */
1408 dev->ops = &otx2_dev_ops;
1409 dev->eth_dev = eth_dev;
1411 /* Grab the NPA LF if required */
1412 rc = otx2_npa_lf_init(pci_dev, dev);
1414 goto otx2_dev_uninit;
1416 dev->configured = 0;
1417 dev->drv_inited = true;
1418 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
1419 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
1422 rc = nix_lf_attach(dev);
1424 goto otx2_npa_uninit;
1426 /* Get NIX MSIX offset */
1427 rc = nix_lf_get_msix_offset(dev);
1429 goto otx2_npa_uninit;
1431 /* Register LF irq handlers */
1432 rc = otx2_nix_register_irqs(eth_dev);
1436 /* Get maximum number of supported MAC entries */
1437 max_entries = otx2_cgx_mac_max_entries_get(dev);
1438 if (max_entries < 0) {
1439 otx2_err("Failed to get max entries for mac addr");
1441 goto unregister_irq;
1444 /* For VFs, returned max_entries will be 0. But to keep default MAC
1445 * address, one entry must be allocated. So setting up to 1.
1447 if (max_entries == 0)
1450 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
1451 RTE_ETHER_ADDR_LEN, 0);
1452 if (eth_dev->data->mac_addrs == NULL) {
1453 otx2_err("Failed to allocate memory for mac addr");
1455 goto unregister_irq;
1458 dev->max_mac_entries = max_entries;
1460 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
1462 goto free_mac_addrs;
1464 /* Update the mac address */
1465 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1467 /* Also sync same MAC address to CGX table */
1468 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
1470 /* Initialize the tm data structures */
1471 otx2_nix_tm_conf_init(eth_dev);
1473 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1474 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1476 if (otx2_dev_is_A0(dev)) {
1477 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
1478 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
1481 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
1482 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1483 eth_dev->data->port_id, dev->pf, dev->vf,
1484 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
1485 dev->rx_offload_capa, dev->tx_offload_capa);
1489 rte_free(eth_dev->data->mac_addrs);
1491 otx2_nix_unregister_irqs(eth_dev);
1493 otx2_eth_dev_lf_detach(dev->mbox);
1497 otx2_dev_fini(pci_dev, dev);
1499 otx2_err("Failed to init nix eth_dev rc=%d", rc);
1504 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
1506 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1507 struct rte_pci_device *pci_dev;
1510 /* Nothing to be done for secondary processes */
1511 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1515 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1516 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
1517 eth_dev->data->tx_queues[i] = NULL;
1519 eth_dev->data->nb_tx_queues = 0;
1521 /* Free up RQ's and CQ's */
1522 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1523 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
1524 eth_dev->data->rx_queues[i] = NULL;
1526 eth_dev->data->nb_rx_queues = 0;
1528 /* Free tm resources */
1529 rc = otx2_nix_tm_fini(eth_dev);
1531 otx2_err("Failed to cleanup tm, rc=%d", rc);
1533 /* Unregister queue irqs */
1534 oxt2_nix_unregister_queue_irqs(eth_dev);
1536 rc = nix_lf_free(dev);
1538 otx2_err("Failed to free nix lf, rc=%d", rc);
1540 rc = otx2_npa_lf_fini();
1542 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
1544 rte_free(eth_dev->data->mac_addrs);
1545 eth_dev->data->mac_addrs = NULL;
1546 dev->drv_inited = false;
1548 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1549 otx2_nix_unregister_irqs(eth_dev);
1551 rc = otx2_eth_dev_lf_detach(dev->mbox);
1553 otx2_err("Failed to detach resources, rc=%d", rc);
1555 /* Check if mbox close is needed */
1559 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
1560 /* Will be freed later by PMD */
1561 eth_dev->data->dev_private = NULL;
1565 otx2_dev_fini(pci_dev, dev);
1570 nix_remove(struct rte_pci_device *pci_dev)
1572 struct rte_eth_dev *eth_dev;
1573 struct otx2_idev_cfg *idev;
1574 struct otx2_dev *otx2_dev;
1577 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1579 /* Cleanup eth dev */
1580 rc = otx2_eth_dev_uninit(eth_dev, true);
1584 rte_eth_dev_pci_release(eth_dev);
1587 /* Nothing to be done for secondary processes */
1588 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1591 /* Check for common resources */
1592 idev = otx2_intra_dev_get_cfg();
1593 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
1596 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
1598 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
1601 /* Safe to cleanup mbox as no more users */
1602 otx2_dev_fini(pci_dev, otx2_dev);
1607 otx2_info("%s: common resource in use by other devices", pci_dev->name);
1612 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
1616 RTE_SET_USED(pci_drv);
1618 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
1621 /* On error on secondary, recheck if port exists in primary or
1622 * in mid of detach state.
1624 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
1625 if (!rte_eth_dev_allocated(pci_dev->device.name))
1630 static const struct rte_pci_id pci_nix_map[] = {
1632 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
1635 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
1638 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
1639 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
1646 static struct rte_pci_driver pci_nix = {
1647 .id_table = pci_nix_map,
1648 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA |
1649 RTE_PCI_DRV_INTR_LSC,
1651 .remove = nix_remove,
1654 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
1655 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
1656 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");