1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
15 #include "otx2_ethdev_sec.h"
17 static inline uint64_t
18 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
20 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
22 if (otx2_dev_is_vf(dev) ||
23 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
24 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
29 static inline uint64_t
30 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
32 uint64_t capa = NIX_TX_OFFLOAD_CAPA;
34 /* TSO not supported for earlier chip revisions */
35 if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
36 capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
37 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
38 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
39 DEV_TX_OFFLOAD_GRE_TNL_TSO);
43 static const struct otx2_dev_ops otx2_dev_ops = {
44 .link_status_update = otx2_eth_dev_link_status_update,
45 .ptp_info_update = otx2_eth_dev_ptp_info_update,
46 .link_status_get = otx2_eth_dev_link_status_get,
50 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
52 struct otx2_mbox *mbox = dev->mbox;
53 struct nix_lf_alloc_req *req;
54 struct nix_lf_alloc_rsp *rsp;
57 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
61 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
62 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
63 req->xqe_sz = NIX_XQESZ_W16;
64 req->rss_sz = dev->rss_info.rss_size;
65 req->rss_grps = NIX_RSS_GRPS;
66 req->npa_func = otx2_npa_pf_func_get();
67 req->sso_func = otx2_sso_pf_func_get();
68 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
69 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
70 DEV_RX_OFFLOAD_UDP_CKSUM)) {
71 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
72 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
74 req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) |
75 BIT_ULL(33 /* Outer L2 Length */) |
76 BIT_ULL(38 /* Inner L4 UDP Length */) |
77 BIT_ULL(39 /* Inner L3 Length */) |
78 BIT_ULL(40 /* Outer L4 UDP Length */) |
79 BIT_ULL(41 /* Outer L3 Length */));
81 if (dev->rss_tag_as_xor == 0)
82 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
84 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
88 dev->sqb_size = rsp->sqb_size;
89 dev->tx_chan_base = rsp->tx_chan_base;
90 dev->rx_chan_base = rsp->rx_chan_base;
91 dev->rx_chan_cnt = rsp->rx_chan_cnt;
92 dev->tx_chan_cnt = rsp->tx_chan_cnt;
93 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
94 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
95 dev->lf_tx_stats = rsp->lf_tx_stats;
96 dev->lf_rx_stats = rsp->lf_rx_stats;
97 dev->cints = rsp->cints;
98 dev->qints = rsp->qints;
99 dev->npc_flow.channel = dev->rx_chan_base;
100 dev->ptp_en = rsp->hw_rx_tstamp_en;
106 nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
108 struct otx2_mbox *mbox = dev->mbox;
109 struct npc_set_pkind *req;
110 struct msg_resp *rsp;
113 if (dev->npc_flow.switch_header_type == 0)
116 /* Notify AF about higig2 config */
117 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
118 req->mode = dev->npc_flow.switch_header_type;
119 if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B) {
120 req->mode = OTX2_PRIV_FLAGS_CUSTOM;
121 req->pkind = NPC_RX_CHLEN90B_PKIND;
122 } else if (dev->npc_flow.switch_header_type ==
123 OTX2_PRIV_FLAGS_CH_LEN_24B) {
124 req->mode = OTX2_PRIV_FLAGS_CUSTOM;
125 req->pkind = NPC_RX_CHLEN24B_PKIND;
126 } else if (dev->npc_flow.switch_header_type ==
127 OTX2_PRIV_FLAGS_EXDSA) {
128 req->mode = OTX2_PRIV_FLAGS_CUSTOM;
129 req->pkind = NPC_RX_EXDSA_PKIND;
130 } else if (dev->npc_flow.switch_header_type ==
131 OTX2_PRIV_FLAGS_VLAN_EXDSA) {
132 req->mode = OTX2_PRIV_FLAGS_CUSTOM;
133 req->pkind = NPC_RX_VLAN_EXDSA_PKIND;
137 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
139 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
142 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
143 req->mode = dev->npc_flow.switch_header_type;
144 if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_90B ||
145 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_CH_LEN_24B)
146 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
149 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
151 return otx2_mbox_process_msg(mbox, (void *)&rsp);
155 nix_lf_free(struct otx2_eth_dev *dev)
157 struct otx2_mbox *mbox = dev->mbox;
158 struct nix_lf_free_req *req;
159 struct ndc_sync_op *ndc_req;
162 /* Sync NDC-NIX for LF */
163 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
164 ndc_req->nix_lf_tx_sync = 1;
165 ndc_req->nix_lf_rx_sync = 1;
166 rc = otx2_mbox_process(mbox);
168 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
170 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
171 /* Let AF driver free all this nix lf's
172 * NPC entries allocated using NPC MBOX.
176 return otx2_mbox_process(mbox);
180 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
182 struct otx2_mbox *mbox = dev->mbox;
184 if (otx2_dev_is_vf_or_sdp(dev))
187 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
189 return otx2_mbox_process(mbox);
193 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
195 struct otx2_mbox *mbox = dev->mbox;
197 if (otx2_dev_is_vf_or_sdp(dev))
200 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
202 return otx2_mbox_process(mbox);
206 npc_rx_enable(struct otx2_eth_dev *dev)
208 struct otx2_mbox *mbox = dev->mbox;
210 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
212 return otx2_mbox_process(mbox);
216 npc_rx_disable(struct otx2_eth_dev *dev)
218 struct otx2_mbox *mbox = dev->mbox;
220 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
222 return otx2_mbox_process(mbox);
226 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
228 struct otx2_mbox *mbox = dev->mbox;
230 if (otx2_dev_is_vf_or_sdp(dev))
233 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
235 return otx2_mbox_process(mbox);
239 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
241 struct otx2_mbox *mbox = dev->mbox;
243 if (en && otx2_dev_is_vf_or_sdp(dev))
247 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
249 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
251 return otx2_mbox_process(mbox);
255 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
257 struct otx2_mbox *mbox = dev->mbox;
259 if (otx2_dev_is_vf_or_sdp(dev))
262 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
264 return otx2_mbox_process(mbox);
268 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
274 static inline uint32_t
275 nix_qsize_to_val(enum nix_q_size_e qsize)
277 return (16UL << (qsize * 2));
280 static inline enum nix_q_size_e
281 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
285 if (otx2_ethdev_fixup_is_min_4k_q(dev))
290 for (; i < nix_q_size_max; i++)
291 if (val <= nix_qsize_to_val(i))
294 if (i >= nix_q_size_max)
295 i = nix_q_size_max - 1;
301 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
302 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
304 struct otx2_mbox *mbox = dev->mbox;
305 const struct rte_memzone *rz;
306 uint32_t ring_size, cq_size;
307 struct nix_aq_enq_req *aq;
312 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
313 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
314 NIX_CQ_ALIGN, dev->node);
316 otx2_err("Failed to allocate mem for cq hw ring");
319 memset(rz->addr, 0, rz->len);
320 rxq->desc = (uintptr_t)rz->addr;
321 rxq->qmask = cq_size - 1;
323 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
325 aq->ctype = NIX_AQ_CTYPE_CQ;
326 aq->op = NIX_AQ_INSTOP_INIT;
330 aq->cq.qsize = rxq->qsize;
331 aq->cq.base = rz->iova;
332 aq->cq.avg_level = 0xff;
333 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
334 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
336 /* Many to one reduction */
337 aq->cq.qint_idx = qid % dev->qints;
338 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
339 aq->cq.cint_idx = qid;
341 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
342 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
343 uint16_t min_rx_drop;
345 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
346 aq->cq.drop = min_rx_drop;
348 rxq->cq_drop = min_rx_drop;
350 rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
351 aq->cq.drop = rxq->cq_drop;
355 /* TX pause frames enable flowctrl on RX side */
356 if (dev->fc_info.tx_pause) {
357 /* Single bpid is allocated for all rx channels for now */
358 aq->cq.bpid = dev->fc_info.bpid[0];
359 aq->cq.bp = rxq->cq_drop;
363 rc = otx2_mbox_process(mbox);
365 otx2_err("Failed to init cq context");
369 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
371 aq->ctype = NIX_AQ_CTYPE_RQ;
372 aq->op = NIX_AQ_INSTOP_INIT;
376 if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
377 aq->rq.ipsech_ena = 1;
379 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
381 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
382 first_skip = (sizeof(struct rte_mbuf));
383 first_skip += RTE_PKTMBUF_HEADROOM;
384 first_skip += rte_pktmbuf_priv_size(mp);
385 rxq->data_off = first_skip;
387 first_skip /= 8; /* Expressed in number of dwords */
388 aq->rq.first_skip = first_skip;
389 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
390 aq->rq.flow_tagw = 32; /* 32-bits */
391 aq->rq.lpb_sizem1 = mp->elt_size / 8;
392 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
394 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
395 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
396 aq->rq.rq_int_ena = 0;
397 /* Many to one reduction */
398 aq->rq.qint_idx = qid % dev->qints;
400 aq->rq.xqe_drop_ena = 1;
402 rc = otx2_mbox_process(mbox);
404 otx2_err("Failed to init rq context");
408 if (dev->lock_rx_ctx) {
409 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
411 aq->ctype = NIX_AQ_CTYPE_CQ;
412 aq->op = NIX_AQ_INSTOP_LOCK;
414 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
416 /* The shared memory buffer can be full.
419 otx2_mbox_msg_send(mbox, 0);
420 rc = otx2_mbox_wait_for_rsp(mbox, 0);
422 otx2_err("Failed to LOCK cq context");
426 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
428 otx2_err("Failed to LOCK rq context");
433 aq->ctype = NIX_AQ_CTYPE_RQ;
434 aq->op = NIX_AQ_INSTOP_LOCK;
435 rc = otx2_mbox_process(mbox);
437 otx2_err("Failed to LOCK rq context");
446 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
447 struct otx2_eth_rxq *rxq, const bool enb)
449 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
450 struct otx2_mbox *mbox = dev->mbox;
451 struct nix_aq_enq_req *aq;
453 /* Pkts will be dropped silently if RQ is disabled */
454 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
456 aq->ctype = NIX_AQ_CTYPE_RQ;
457 aq->op = NIX_AQ_INSTOP_WRITE;
460 aq->rq_mask.ena = ~(aq->rq_mask.ena);
462 return otx2_mbox_process(mbox);
466 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
468 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
469 struct otx2_mbox *mbox = dev->mbox;
470 struct nix_aq_enq_req *aq;
473 /* RQ is already disabled */
475 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
477 aq->ctype = NIX_AQ_CTYPE_CQ;
478 aq->op = NIX_AQ_INSTOP_WRITE;
481 aq->cq_mask.ena = ~(aq->cq_mask.ena);
483 rc = otx2_mbox_process(mbox);
485 otx2_err("Failed to disable cq context");
489 if (dev->lock_rx_ctx) {
490 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
492 aq->ctype = NIX_AQ_CTYPE_CQ;
493 aq->op = NIX_AQ_INSTOP_UNLOCK;
495 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
497 /* The shared memory buffer can be full.
500 otx2_mbox_msg_send(mbox, 0);
501 rc = otx2_mbox_wait_for_rsp(mbox, 0);
503 otx2_err("Failed to UNLOCK cq context");
507 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
509 otx2_err("Failed to UNLOCK rq context");
514 aq->ctype = NIX_AQ_CTYPE_RQ;
515 aq->op = NIX_AQ_INSTOP_UNLOCK;
516 rc = otx2_mbox_process(mbox);
518 otx2_err("Failed to UNLOCK rq context");
527 nix_get_data_off(struct otx2_eth_dev *dev)
529 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
533 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
535 struct rte_mbuf mb_def;
538 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
539 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
540 offsetof(struct rte_mbuf, data_off) != 2);
541 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
542 offsetof(struct rte_mbuf, data_off) != 4);
543 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
544 offsetof(struct rte_mbuf, data_off) != 6);
546 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
547 mb_def.port = port_id;
548 rte_mbuf_refcnt_set(&mb_def, 1);
550 /* Prevent compiler reordering: rearm_data covers previous fields */
551 rte_compiler_barrier();
552 tmp = (uint64_t *)&mb_def.rearm_data;
558 otx2_nix_rx_queue_release(void *rx_queue)
560 struct otx2_eth_rxq *rxq = rx_queue;
565 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
566 nix_cq_rq_uninit(rxq->eth_dev, rxq);
571 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
572 uint16_t nb_desc, unsigned int socket,
573 const struct rte_eth_rxconf *rx_conf,
574 struct rte_mempool *mp)
576 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
577 struct rte_mempool_ops *ops;
578 struct otx2_eth_rxq *rxq;
579 const char *platform_ops;
580 enum nix_q_size_e qsize;
586 /* Compile time check to make sure all fast path elements in a CL */
587 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
590 if (rx_conf->rx_deferred_start == 1) {
591 otx2_err("Deferred Rx start is not supported");
595 platform_ops = rte_mbuf_platform_mempool_ops();
596 /* This driver needs octeontx2_npa mempool ops to work */
597 ops = rte_mempool_get_ops(mp->ops_index);
598 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
599 otx2_err("mempool ops should be of octeontx2_npa type");
603 if (mp->pool_id == 0) {
604 otx2_err("Invalid pool_id");
608 /* Free memory prior to re-allocation if needed */
609 if (eth_dev->data->rx_queues[rq] != NULL) {
610 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
611 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
612 rte_eth_dma_zone_free(eth_dev, "cq", rq);
613 eth_dev->data->rx_queues[rq] = NULL;
616 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
617 dev->rx_offloads |= offloads;
619 /* Find the CQ queue size */
620 qsize = nix_qsize_clampup_get(dev, nb_desc);
621 /* Allocate rxq memory */
622 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
624 otx2_err("Failed to allocate rq=%d", rq);
629 rxq->eth_dev = eth_dev;
631 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
632 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
633 rxq->wdata = (uint64_t)rq << 32;
634 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
635 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
636 eth_dev->data->port_id);
637 rxq->offloads = offloads;
639 rxq->qlen = nix_qsize_to_val(qsize);
641 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
642 rxq->tstamp = &dev->tstamp;
644 /* Alloc completion queue */
645 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
647 otx2_err("Failed to allocate rxq=%u", rq);
651 rxq->qconf.socket_id = socket;
652 rxq->qconf.nb_desc = nb_desc;
653 rxq->qconf.mempool = mp;
654 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
656 nix_rx_queue_reset(rxq);
657 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
658 rq, mp->name, qsize, nb_desc, rxq->qlen);
660 eth_dev->data->rx_queues[rq] = rxq;
661 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
663 /* Calculating delta and freq mult between PTP HI clock and tsc.
664 * These are needed in deriving raw clock value from tsc counter.
665 * read_clock eth op returns raw clock value.
667 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
668 otx2_ethdev_is_ptp_en(dev)) {
669 rc = otx2_nix_raw_clock_tsc_conv(dev);
671 otx2_err("Failed to calculate delta and freq mult");
676 /* Setup scatter mode if needed by jumbo */
677 otx2_nix_enable_mseg_on_jumbo(rxq);
682 otx2_nix_rx_queue_release(rxq);
687 static inline uint8_t
688 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
691 * Maximum three segments can be supported with W8, Choose
692 * NIX_MAXSQESZ_W16 for multi segment offload.
694 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
695 return NIX_MAXSQESZ_W16;
697 return NIX_MAXSQESZ_W8;
701 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
703 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
704 struct rte_eth_dev_data *data = eth_dev->data;
705 struct rte_eth_conf *conf = &data->dev_conf;
706 struct rte_eth_rxmode *rxmode = &conf->rxmode;
709 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
710 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
711 flags |= NIX_RX_OFFLOAD_RSS_F;
713 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
714 DEV_RX_OFFLOAD_UDP_CKSUM))
715 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
717 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
718 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
719 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
721 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
722 flags |= NIX_RX_MULTI_SEG_F;
724 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
725 DEV_RX_OFFLOAD_QINQ_STRIP))
726 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
728 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
729 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
731 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
732 flags |= NIX_RX_OFFLOAD_SECURITY_F;
734 if (!dev->ptype_disable)
735 flags |= NIX_RX_OFFLOAD_PTYPE_F;
741 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
743 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
744 uint64_t conf = dev->tx_offloads;
747 /* Fastpath is dependent on these enums */
748 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
749 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
750 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
751 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
752 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
753 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
754 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
755 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
756 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
757 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
758 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
759 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
760 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
761 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
762 offsetof(struct rte_mbuf, buf_iova) + 8);
763 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
764 offsetof(struct rte_mbuf, buf_iova) + 16);
765 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
766 offsetof(struct rte_mbuf, ol_flags) + 12);
767 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
768 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
770 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
771 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
772 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
774 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
775 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
776 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
778 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
779 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
780 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
781 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
782 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
784 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
785 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
787 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
788 flags |= NIX_TX_MULTI_SEG_F;
790 /* Enable Inner checksum for TSO */
791 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
792 flags |= (NIX_TX_OFFLOAD_TSO_F |
793 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
795 /* Enable Inner and Outer checksum for Tunnel TSO */
796 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
797 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
798 DEV_TX_OFFLOAD_GRE_TNL_TSO))
799 flags |= (NIX_TX_OFFLOAD_TSO_F |
800 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
801 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
803 if (conf & DEV_TX_OFFLOAD_SECURITY)
804 flags |= NIX_TX_OFFLOAD_SECURITY_F;
806 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
807 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
813 nix_sqb_lock(struct rte_mempool *mp)
815 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
816 struct npa_aq_enq_req *req;
819 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
820 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
821 req->ctype = NPA_AQ_CTYPE_AURA;
822 req->op = NPA_AQ_INSTOP_LOCK;
824 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
826 /* The shared memory buffer can be full.
829 otx2_mbox_msg_send(npa_lf->mbox, 0);
830 rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
832 otx2_err("Failed to LOCK AURA context");
836 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
838 otx2_err("Failed to LOCK POOL context");
843 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
844 req->ctype = NPA_AQ_CTYPE_POOL;
845 req->op = NPA_AQ_INSTOP_LOCK;
847 rc = otx2_mbox_process(npa_lf->mbox);
849 otx2_err("Unable to lock POOL in NDC");
857 nix_sqb_unlock(struct rte_mempool *mp)
859 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
860 struct npa_aq_enq_req *req;
863 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
864 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
865 req->ctype = NPA_AQ_CTYPE_AURA;
866 req->op = NPA_AQ_INSTOP_UNLOCK;
868 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
870 /* The shared memory buffer can be full.
873 otx2_mbox_msg_send(npa_lf->mbox, 0);
874 rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
876 otx2_err("Failed to UNLOCK AURA context");
880 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
882 otx2_err("Failed to UNLOCK POOL context");
886 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
887 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
888 req->ctype = NPA_AQ_CTYPE_POOL;
889 req->op = NPA_AQ_INSTOP_UNLOCK;
891 rc = otx2_mbox_process(npa_lf->mbox);
893 otx2_err("Unable to UNLOCK AURA in NDC");
901 otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
903 struct rte_pktmbuf_pool_private *mbp_priv;
904 struct rte_eth_dev *eth_dev;
905 struct otx2_eth_dev *dev;
908 eth_dev = rxq->eth_dev;
909 dev = otx2_eth_pmd_priv(eth_dev);
911 /* Get rx buffer size */
912 mbp_priv = rte_mempool_get_priv(rxq->pool);
913 buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
915 if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
916 dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
917 dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
919 /* Setting up the rx[tx]_offload_flags due to change
920 * in rx[tx]_offloads.
922 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
923 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
928 nix_sq_init(struct otx2_eth_txq *txq)
930 struct otx2_eth_dev *dev = txq->dev;
931 struct otx2_mbox *mbox = dev->mbox;
932 struct nix_aq_enq_req *sq;
937 if (txq->sqb_pool->pool_id == 0)
940 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
942 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
946 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
948 sq->ctype = NIX_AQ_CTYPE_SQ;
949 sq->op = NIX_AQ_INSTOP_INIT;
950 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
953 sq->sq.smq_rr_quantum = rr_quantum;
954 sq->sq.default_chan = dev->tx_chan_base;
955 sq->sq.sqe_stype = NIX_STYPE_STF;
957 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
958 sq->sq.sqe_stype = NIX_STYPE_STP;
960 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
961 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
962 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
963 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
964 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
966 /* Many to one reduction */
967 sq->sq.qint_idx = txq->sq % dev->qints;
969 rc = otx2_mbox_process(mbox);
973 if (dev->lock_tx_ctx) {
974 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
976 sq->ctype = NIX_AQ_CTYPE_SQ;
977 sq->op = NIX_AQ_INSTOP_LOCK;
979 rc = otx2_mbox_process(mbox);
986 nix_sq_uninit(struct otx2_eth_txq *txq)
988 struct otx2_eth_dev *dev = txq->dev;
989 struct otx2_mbox *mbox = dev->mbox;
990 struct ndc_sync_op *ndc_req;
991 struct nix_aq_enq_rsp *rsp;
992 struct nix_aq_enq_req *aq;
993 uint16_t sqes_per_sqb;
997 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
999 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1001 aq->ctype = NIX_AQ_CTYPE_SQ;
1002 aq->op = NIX_AQ_INSTOP_READ;
1004 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1008 /* Check if sq is already cleaned up */
1013 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1015 aq->ctype = NIX_AQ_CTYPE_SQ;
1016 aq->op = NIX_AQ_INSTOP_WRITE;
1018 aq->sq_mask.ena = ~aq->sq_mask.ena;
1021 rc = otx2_mbox_process(mbox);
1025 if (dev->lock_tx_ctx) {
1027 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1029 aq->ctype = NIX_AQ_CTYPE_SQ;
1030 aq->op = NIX_AQ_INSTOP_UNLOCK;
1032 rc = otx2_mbox_process(mbox);
1036 nix_sqb_unlock(txq->sqb_pool);
1039 /* Read SQ and free sqb's */
1040 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
1042 aq->ctype = NIX_AQ_CTYPE_SQ;
1043 aq->op = NIX_AQ_INSTOP_READ;
1045 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1049 if (aq->sq.smq_pend)
1050 otx2_err("SQ has pending sqe's");
1052 count = aq->sq.sqb_count;
1053 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
1054 /* Free SQB's that are used */
1055 sqb_buf = (void *)rsp->sq.head_sqb;
1059 next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
1060 ((sqes_per_sqb - 1) *
1061 nix_sq_max_sqe_sz(txq)));
1062 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1068 /* Free next to use sqb */
1069 if (rsp->sq.next_sqb)
1070 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1073 /* Sync NDC-NIX-TX for LF */
1074 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
1075 ndc_req->nix_lf_tx_sync = 1;
1076 rc = otx2_mbox_process(mbox);
1078 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
1084 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
1086 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
1087 struct npa_aq_enq_req *aura_req;
1089 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
1090 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
1091 aura_req->ctype = NPA_AQ_CTYPE_AURA;
1092 aura_req->op = NPA_AQ_INSTOP_WRITE;
1094 aura_req->aura.limit = nb_sqb_bufs;
1095 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
1097 return otx2_mbox_process(npa_lf->mbox);
1101 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
1103 struct otx2_eth_dev *dev = txq->dev;
1104 uint16_t sqes_per_sqb, nb_sqb_bufs;
1105 char name[RTE_MEMPOOL_NAMESIZE];
1106 struct rte_mempool_objsz sz;
1107 struct npa_aura_s *aura;
1108 uint32_t tmp, blk_sz;
1110 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
1111 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
1112 blk_sz = dev->sqb_size;
1114 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
1115 sqes_per_sqb = (dev->sqb_size / 8) / 16;
1117 sqes_per_sqb = (dev->sqb_size / 8) / 8;
1119 nb_sqb_bufs = nb_desc / sqes_per_sqb;
1120 /* Clamp up to devarg passed SQB count */
1121 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
1122 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
1124 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
1126 MEMPOOL_F_NO_SPREAD);
1127 txq->nb_sqb_bufs = nb_sqb_bufs;
1128 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
1129 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
1130 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
1131 txq->nb_sqb_bufs_adj =
1132 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
1134 if (txq->sqb_pool == NULL) {
1135 otx2_err("Failed to allocate sqe mempool");
1139 memset(aura, 0, sizeof(*aura));
1141 aura->fc_addr = txq->fc_iova;
1142 aura->fc_hyst_bits = 0; /* Store count on all updates */
1143 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
1144 otx2_err("Failed to set ops for sqe mempool");
1147 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
1148 otx2_err("Failed to populate sqe mempool");
1152 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
1153 if (dev->sqb_size != sz.elt_size) {
1154 otx2_err("sqe pool block size is not expected %d != %d",
1155 dev->sqb_size, tmp);
1159 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
1160 if (dev->lock_tx_ctx)
1161 nix_sqb_lock(txq->sqb_pool);
1169 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
1171 struct nix_send_ext_s *send_hdr_ext;
1172 struct nix_send_hdr_s *send_hdr;
1173 struct nix_send_mem_s *send_mem;
1174 union nix_send_sg_s *sg;
1176 /* Initialize the fields based on basic single segment packet */
1177 memset(&txq->cmd, 0, sizeof(txq->cmd));
1179 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
1180 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1181 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
1182 send_hdr->w0.sizem1 = 2;
1184 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
1185 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
1186 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
1187 /* Default: one seg packet would have:
1188 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
1191 send_hdr->w0.sizem1 = 3;
1192 send_hdr_ext->w0.tstmp = 1;
1194 /* To calculate the offset for send_mem,
1195 * send_hdr->w0.sizem1 * 2
1197 send_mem = (struct nix_send_mem_s *)(txq->cmd +
1198 (send_hdr->w0.sizem1 << 1));
1199 send_mem->subdc = NIX_SUBDC_MEM;
1200 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
1201 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
1203 sg = (union nix_send_sg_s *)&txq->cmd[4];
1205 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1206 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
1207 send_hdr->w0.sizem1 = 1;
1208 sg = (union nix_send_sg_s *)&txq->cmd[2];
1211 send_hdr->w0.sq = txq->sq;
1212 sg->subdc = NIX_SUBDC_SG;
1214 sg->ld_type = NIX_SENDLDTYPE_LDD;
1220 otx2_nix_tx_queue_release(void *_txq)
1222 struct otx2_eth_txq *txq = _txq;
1223 struct rte_eth_dev *eth_dev;
1228 eth_dev = txq->dev->eth_dev;
1230 otx2_nix_dbg("Releasing txq %u", txq->sq);
1232 /* Flush and disable tm */
1233 otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);
1235 /* Free sqb's and disable sq */
1238 if (txq->sqb_pool) {
1239 rte_mempool_free(txq->sqb_pool);
1240 txq->sqb_pool = NULL;
1242 otx2_nix_sq_flush_post(txq);
1248 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
1249 uint16_t nb_desc, unsigned int socket_id,
1250 const struct rte_eth_txconf *tx_conf)
1252 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1253 const struct rte_memzone *fc;
1254 struct otx2_eth_txq *txq;
1260 /* Compile time check to make sure all fast path elements in a CL */
1261 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
1263 if (tx_conf->tx_deferred_start) {
1264 otx2_err("Tx deferred start is not supported");
1268 /* Free memory prior to re-allocation if needed. */
1269 if (eth_dev->data->tx_queues[sq] != NULL) {
1270 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
1271 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
1272 eth_dev->data->tx_queues[sq] = NULL;
1275 /* Find the expected offloads for this queue */
1276 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
1278 /* Allocating tx queue data structure */
1279 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
1280 OTX2_ALIGN, socket_id);
1282 otx2_err("Failed to alloc txq=%d", sq);
1288 txq->sqb_pool = NULL;
1289 txq->offloads = offloads;
1290 dev->tx_offloads |= offloads;
1293 * Allocate memory for flow control updates from HW.
1294 * Alloc one cache line, so that fits all FC_STYPE modes.
1296 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1297 OTX2_ALIGN + sizeof(struct npa_aura_s),
1298 OTX2_ALIGN, dev->node);
1300 otx2_err("Failed to allocate mem for fcmem");
1304 txq->fc_iova = fc->iova;
1305 txq->fc_mem = fc->addr;
1307 /* Initialize the aura sqb pool */
1308 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1310 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1314 /* Initialize the SQ */
1315 rc = nix_sq_init(txq);
1317 otx2_err("Failed to init sq=%d context", sq);
1321 txq->fc_cache_pkts = 0;
1322 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1323 /* Evenly distribute LMT slot for each sq */
1324 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1326 txq->qconf.socket_id = socket_id;
1327 txq->qconf.nb_desc = nb_desc;
1328 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1330 txq->lso_tun_fmt = dev->lso_tun_fmt;
1331 otx2_nix_form_default_desc(txq);
1333 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1334 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1335 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1336 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1337 eth_dev->data->tx_queues[sq] = txq;
1338 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1342 otx2_nix_tx_queue_release(txq);
1348 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1350 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1351 struct otx2_eth_qconf *tx_qconf = NULL;
1352 struct otx2_eth_qconf *rx_qconf = NULL;
1353 struct otx2_eth_txq **txq;
1354 struct otx2_eth_rxq **rxq;
1355 int i, nb_rxq, nb_txq;
1357 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1358 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1360 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1361 if (tx_qconf == NULL) {
1362 otx2_err("Failed to allocate memory for tx_qconf");
1366 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1367 if (rx_qconf == NULL) {
1368 otx2_err("Failed to allocate memory for rx_qconf");
1372 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1373 for (i = 0; i < nb_txq; i++) {
1374 if (txq[i] == NULL) {
1375 tx_qconf[i].valid = false;
1376 otx2_info("txq[%d] is already released", i);
1379 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1380 tx_qconf[i].valid = true;
1381 otx2_nix_tx_queue_release(txq[i]);
1382 eth_dev->data->tx_queues[i] = NULL;
1385 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1386 for (i = 0; i < nb_rxq; i++) {
1387 if (rxq[i] == NULL) {
1388 rx_qconf[i].valid = false;
1389 otx2_info("rxq[%d] is already released", i);
1392 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1393 rx_qconf[i].valid = true;
1394 otx2_nix_rx_queue_release(rxq[i]);
1395 eth_dev->data->rx_queues[i] = NULL;
1398 dev->tx_qconf = tx_qconf;
1399 dev->rx_qconf = rx_qconf;
1410 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1412 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1413 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1414 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1415 struct otx2_eth_txq **txq;
1416 struct otx2_eth_rxq **rxq;
1417 int rc, i, nb_rxq, nb_txq;
1419 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1420 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1423 /* Setup tx & rx queues with previous configuration so
1424 * that the queues can be functional in cases like ports
1425 * are started without re configuring queues.
1427 * Usual re config sequence is like below:
1428 * port_configure() {
1433 * queue_configure() {
1440 * In some application's control path, queue_configure() would
1441 * NOT be invoked for TXQs/RXQs in port_configure().
1442 * In such cases, queues can be functional after start as the
1443 * queues are already setup in port_configure().
1445 for (i = 0; i < nb_txq; i++) {
1446 if (!tx_qconf[i].valid)
1448 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1449 tx_qconf[i].socket_id,
1450 &tx_qconf[i].conf.tx);
1452 otx2_err("Failed to setup tx queue rc=%d", rc);
1453 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1454 for (i -= 1; i >= 0; i--)
1455 otx2_nix_tx_queue_release(txq[i]);
1460 free(tx_qconf); tx_qconf = NULL;
1462 for (i = 0; i < nb_rxq; i++) {
1463 if (!rx_qconf[i].valid)
1465 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1466 rx_qconf[i].socket_id,
1467 &rx_qconf[i].conf.rx,
1468 rx_qconf[i].mempool);
1470 otx2_err("Failed to setup rx queue rc=%d", rc);
1471 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1472 for (i -= 1; i >= 0; i--)
1473 otx2_nix_rx_queue_release(rxq[i]);
1474 goto release_tx_queues;
1478 free(rx_qconf); rx_qconf = NULL;
1483 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1484 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1485 otx2_nix_tx_queue_release(txq[i]);
1496 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1498 RTE_SET_USED(queue);
1499 RTE_SET_USED(mbufs);
1506 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1508 /* These dummy functions are required for supporting
1509 * some applications which reconfigure queues without
1510 * stopping tx burst and rx burst threads(eg kni app)
1511 * When the queues context is saved, txq/rxqs are released
1512 * which caused app crash since rx/tx burst is still
1513 * on different lcores
1515 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1516 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1521 nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1523 volatile struct nix_lso_format *field;
1525 /* Format works only with TCP packet marked by OL3/OL4 */
1526 field = (volatile struct nix_lso_format *)&req->fields[0];
1527 req->field_mask = NIX_LSO_FIELD_MASK;
1528 /* Outer IPv4/IPv6 */
1529 field->layer = NIX_TXLAYER_OL3;
1530 field->offset = v4 ? 2 : 4;
1531 field->sizem1 = 1; /* 2B */
1532 field->alg = NIX_LSOALG_ADD_PAYLEN;
1536 field->layer = NIX_TXLAYER_OL3;
1539 /* Incremented linearly per segment */
1540 field->alg = NIX_LSOALG_ADD_SEGNUM;
1544 /* TCP sequence number update */
1545 field->layer = NIX_TXLAYER_OL4;
1547 field->sizem1 = 3; /* 4 bytes */
1548 field->alg = NIX_LSOALG_ADD_OFFSET;
1550 /* TCP flags field */
1551 field->layer = NIX_TXLAYER_OL4;
1554 field->alg = NIX_LSOALG_TCP_FLAGS;
1559 nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1560 bool outer_v4, bool inner_v4)
1562 volatile struct nix_lso_format *field;
1564 field = (volatile struct nix_lso_format *)&req->fields[0];
1565 req->field_mask = NIX_LSO_FIELD_MASK;
1566 /* Outer IPv4/IPv6 len */
1567 field->layer = NIX_TXLAYER_OL3;
1568 field->offset = outer_v4 ? 2 : 4;
1569 field->sizem1 = 1; /* 2B */
1570 field->alg = NIX_LSOALG_ADD_PAYLEN;
1574 field->layer = NIX_TXLAYER_OL3;
1577 /* Incremented linearly per segment */
1578 field->alg = NIX_LSOALG_ADD_SEGNUM;
1582 /* Outer UDP length */
1583 field->layer = NIX_TXLAYER_OL4;
1586 field->alg = NIX_LSOALG_ADD_PAYLEN;
1589 /* Inner IPv4/IPv6 */
1590 field->layer = NIX_TXLAYER_IL3;
1591 field->offset = inner_v4 ? 2 : 4;
1592 field->sizem1 = 1; /* 2B */
1593 field->alg = NIX_LSOALG_ADD_PAYLEN;
1597 field->layer = NIX_TXLAYER_IL3;
1600 /* Incremented linearly per segment */
1601 field->alg = NIX_LSOALG_ADD_SEGNUM;
1605 /* TCP sequence number update */
1606 field->layer = NIX_TXLAYER_IL4;
1608 field->sizem1 = 3; /* 4 bytes */
1609 field->alg = NIX_LSOALG_ADD_OFFSET;
1612 /* TCP flags field */
1613 field->layer = NIX_TXLAYER_IL4;
1616 field->alg = NIX_LSOALG_TCP_FLAGS;
1621 nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
1622 bool outer_v4, bool inner_v4)
1624 volatile struct nix_lso_format *field;
1626 field = (volatile struct nix_lso_format *)&req->fields[0];
1627 req->field_mask = NIX_LSO_FIELD_MASK;
1628 /* Outer IPv4/IPv6 len */
1629 field->layer = NIX_TXLAYER_OL3;
1630 field->offset = outer_v4 ? 2 : 4;
1631 field->sizem1 = 1; /* 2B */
1632 field->alg = NIX_LSOALG_ADD_PAYLEN;
1636 field->layer = NIX_TXLAYER_OL3;
1639 /* Incremented linearly per segment */
1640 field->alg = NIX_LSOALG_ADD_SEGNUM;
1644 /* Inner IPv4/IPv6 */
1645 field->layer = NIX_TXLAYER_IL3;
1646 field->offset = inner_v4 ? 2 : 4;
1647 field->sizem1 = 1; /* 2B */
1648 field->alg = NIX_LSOALG_ADD_PAYLEN;
1652 field->layer = NIX_TXLAYER_IL3;
1655 /* Incremented linearly per segment */
1656 field->alg = NIX_LSOALG_ADD_SEGNUM;
1660 /* TCP sequence number update */
1661 field->layer = NIX_TXLAYER_IL4;
1663 field->sizem1 = 3; /* 4 bytes */
1664 field->alg = NIX_LSOALG_ADD_OFFSET;
1667 /* TCP flags field */
1668 field->layer = NIX_TXLAYER_IL4;
1671 field->alg = NIX_LSOALG_TCP_FLAGS;
1676 nix_setup_lso_formats(struct otx2_eth_dev *dev)
1678 struct otx2_mbox *mbox = dev->mbox;
1679 struct nix_lso_format_cfg_rsp *rsp;
1680 struct nix_lso_format_cfg *req;
1684 /* Skip if TSO was not requested */
1685 if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1690 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1691 nix_lso_tcp(req, true);
1692 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1696 if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV4)
1698 otx2_nix_dbg("tcpv4 lso fmt=%u", rsp->lso_format_idx);
1704 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1705 nix_lso_tcp(req, false);
1706 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1710 if (rsp->lso_format_idx != NIX_LSO_FORMAT_IDX_TSOV6)
1712 otx2_nix_dbg("tcpv6 lso fmt=%u\n", rsp->lso_format_idx);
1715 * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1717 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1718 nix_lso_udp_tun_tcp(req, true, true);
1719 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1723 dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
1724 otx2_nix_dbg("udp tun v4v4 fmt=%u\n", rsp->lso_format_idx);
1727 * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1729 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1730 nix_lso_udp_tun_tcp(req, true, false);
1731 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1735 dev->lso_udp_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
1736 otx2_nix_dbg("udp tun v4v6 fmt=%u\n", rsp->lso_format_idx);
1739 * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1741 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1742 nix_lso_udp_tun_tcp(req, false, true);
1743 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1747 dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
1748 otx2_nix_dbg("udp tun v6v4 fmt=%u\n", rsp->lso_format_idx);
1751 * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1753 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1754 nix_lso_udp_tun_tcp(req, false, false);
1755 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1759 dev->lso_udp_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
1760 otx2_nix_dbg("udp tun v6v6 fmt=%u\n", rsp->lso_format_idx);
1763 * IPv4/TUN HDR/IPv4/TCP LSO
1765 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1766 nix_lso_tun_tcp(req, true, true);
1767 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1771 dev->lso_tun_idx[NIX_LSO_TUN_V4V4] = rsp->lso_format_idx;
1772 otx2_nix_dbg("tun v4v4 fmt=%u\n", rsp->lso_format_idx);
1775 * IPv4/TUN HDR/IPv6/TCP LSO
1777 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1778 nix_lso_tun_tcp(req, true, false);
1779 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1783 dev->lso_tun_idx[NIX_LSO_TUN_V4V6] = rsp->lso_format_idx;
1784 otx2_nix_dbg("tun v4v6 fmt=%u\n", rsp->lso_format_idx);
1787 * IPv6/TUN HDR/IPv4/TCP LSO
1789 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1790 nix_lso_tun_tcp(req, false, true);
1791 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1795 dev->lso_tun_idx[NIX_LSO_TUN_V6V4] = rsp->lso_format_idx;
1796 otx2_nix_dbg("tun v6v4 fmt=%u\n", rsp->lso_format_idx);
1799 * IPv6/TUN HDR/IPv6/TCP LSO
1801 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1802 nix_lso_tun_tcp(req, false, false);
1803 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1807 dev->lso_tun_idx[NIX_LSO_TUN_V6V6] = rsp->lso_format_idx;
1808 otx2_nix_dbg("tun v6v6 fmt=%u\n", rsp->lso_format_idx);
1810 /* Save all tun formats into u64 for fast path.
1811 * Lower 32bit has non-udp tunnel formats.
1812 * Upper 32bit has udp tunnel formats.
1814 fmt = dev->lso_tun_idx;
1815 dev->lso_tun_fmt = ((uint64_t)fmt[NIX_LSO_TUN_V4V4] |
1816 (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 8 |
1817 (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 16 |
1818 (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 24);
1820 fmt = dev->lso_udp_tun_idx;
1821 dev->lso_tun_fmt |= ((uint64_t)fmt[NIX_LSO_TUN_V4V4] << 32 |
1822 (uint64_t)fmt[NIX_LSO_TUN_V4V6] << 40 |
1823 (uint64_t)fmt[NIX_LSO_TUN_V6V4] << 48 |
1824 (uint64_t)fmt[NIX_LSO_TUN_V6V6] << 56);
1830 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1832 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1833 struct rte_eth_dev_data *data = eth_dev->data;
1834 struct rte_eth_conf *conf = &data->dev_conf;
1835 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1836 struct rte_eth_txmode *txmode = &conf->txmode;
1837 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1838 struct rte_ether_addr *ea;
1839 uint8_t nb_rxq, nb_txq;
1845 if (rte_eal_has_hugepages() == 0) {
1846 otx2_err("Huge page is not configured");
1847 goto fail_configure;
1850 if (conf->dcb_capability_en == 1) {
1851 otx2_err("dcb enable is not supported");
1852 goto fail_configure;
1855 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1856 otx2_err("Flow director is not supported");
1857 goto fail_configure;
1860 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1861 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1862 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1863 goto fail_configure;
1866 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1867 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1868 goto fail_configure;
1871 if (otx2_dev_is_Ax(dev) &&
1872 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1873 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1874 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1875 otx2_err("Outer IP and SCTP checksum unsupported");
1876 goto fail_configure;
1879 /* Free the resources allocated from the previous configure */
1880 if (dev->configured == 1) {
1881 otx2_eth_sec_fini(eth_dev);
1882 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1883 otx2_nix_vlan_fini(eth_dev);
1884 otx2_nix_mc_addr_list_uninstall(eth_dev);
1885 otx2_flow_free_all_resources(dev);
1886 oxt2_nix_unregister_queue_irqs(eth_dev);
1887 if (eth_dev->data->dev_conf.intr_conf.rxq)
1888 oxt2_nix_unregister_cq_irqs(eth_dev);
1889 nix_set_nop_rxtx_function(eth_dev);
1890 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1892 goto fail_configure;
1893 otx2_nix_tm_fini(eth_dev);
1897 dev->rx_offloads = rxmode->offloads;
1898 dev->tx_offloads = txmode->offloads;
1899 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1900 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1901 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1903 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1904 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1906 /* Alloc a nix lf */
1907 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1909 otx2_err("Failed to init nix_lf rc=%d", rc);
1913 otx2_nix_err_intr_enb_dis(eth_dev, true);
1914 otx2_nix_ras_intr_enb_dis(eth_dev, true);
1917 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
1918 otx2_err("Both PTP and switch header enabled");
1922 rc = nix_lf_switch_header_type_enable(dev, true);
1924 otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
1928 rc = nix_setup_lso_formats(dev);
1930 otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1935 rc = otx2_nix_rss_config(eth_dev);
1937 otx2_err("Failed to configure rss rc=%d", rc);
1941 /* Init the default TM scheduler hierarchy */
1942 rc = otx2_nix_tm_init_default(eth_dev);
1944 otx2_err("Failed to init traffic manager rc=%d", rc);
1948 rc = otx2_nix_vlan_offload_init(eth_dev);
1950 otx2_err("Failed to init vlan offload rc=%d", rc);
1954 /* Register queue IRQs */
1955 rc = oxt2_nix_register_queue_irqs(eth_dev);
1957 otx2_err("Failed to register queue interrupts rc=%d", rc);
1961 /* Register cq IRQs */
1962 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1963 if (eth_dev->data->nb_rx_queues > dev->cints) {
1964 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1968 /* Rx interrupt feature cannot work with vector mode because,
1969 * vector mode doesn't process packets unless min 4 pkts are
1970 * received, while cq interrupts are generated even for 1 pkt
1973 dev->scalar_ena = true;
1975 rc = oxt2_nix_register_cq_irqs(eth_dev);
1977 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1982 /* Configure loop back mode */
1983 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1985 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1989 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1991 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1995 /* Enable security */
1996 rc = otx2_eth_sec_init(eth_dev);
2000 rc = otx2_nix_flow_ctrl_init(eth_dev);
2002 otx2_err("Failed to init flow ctrl mode %d", rc);
2006 rc = otx2_nix_mc_addr_list_install(eth_dev);
2008 otx2_err("Failed to install mc address list rc=%d", rc);
2013 * Restore queue config when reconfigure followed by
2014 * reconfigure and no queue configure invoked from application case.
2016 if (dev->configured == 1) {
2017 rc = nix_restore_queue_cfg(eth_dev);
2019 goto uninstall_mc_list;
2022 /* Update the mac address */
2023 ea = eth_dev->data->mac_addrs;
2024 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2025 if (rte_is_zero_ether_addr(ea))
2026 rte_eth_random_addr((uint8_t *)ea);
2028 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
2030 /* Apply new link configurations if changed */
2031 rc = otx2_apply_link_speed(eth_dev);
2033 otx2_err("Failed to set link configuration");
2034 goto uninstall_mc_list;
2037 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
2038 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
2039 " rx_flags=0x%x tx_flags=0x%x",
2040 eth_dev->data->port_id, ea_fmt, nb_rxq,
2041 nb_txq, dev->rx_offloads, dev->tx_offloads,
2042 dev->rx_offload_flags, dev->tx_offload_flags);
2045 dev->configured = 1;
2046 dev->configured_nb_rx_qs = data->nb_rx_queues;
2047 dev->configured_nb_tx_qs = data->nb_tx_queues;
2051 otx2_nix_mc_addr_list_uninstall(eth_dev);
2053 otx2_eth_sec_fini(eth_dev);
2055 oxt2_nix_unregister_cq_irqs(eth_dev);
2057 oxt2_nix_unregister_queue_irqs(eth_dev);
2059 otx2_nix_vlan_fini(eth_dev);
2061 otx2_nix_tm_fini(eth_dev);
2065 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
2066 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
2068 dev->configured = 0;
2073 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2075 struct rte_eth_dev_data *data = eth_dev->data;
2076 struct otx2_eth_txq *txq;
2079 txq = eth_dev->data->tx_queues[qidx];
2081 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2084 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
2086 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
2091 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2098 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2100 struct rte_eth_dev_data *data = eth_dev->data;
2101 struct otx2_eth_txq *txq;
2104 txq = eth_dev->data->tx_queues[qidx];
2106 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2109 txq->fc_cache_pkts = 0;
2111 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
2113 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
2118 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2125 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2127 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2128 struct rte_eth_dev_data *data = eth_dev->data;
2131 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2134 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
2136 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
2140 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2147 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2149 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2150 struct rte_eth_dev_data *data = eth_dev->data;
2153 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2156 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
2158 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
2162 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2169 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
2171 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2172 struct rte_mbuf *rx_pkts[32];
2173 struct otx2_eth_rxq *rxq;
2174 int count, i, j, rc;
2176 nix_lf_switch_header_type_enable(dev, false);
2177 nix_cgx_stop_link_event(dev);
2178 npc_rx_disable(dev);
2180 /* Stop rx queues and free up pkts pending */
2181 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2182 rc = otx2_nix_rx_queue_stop(eth_dev, i);
2186 rxq = eth_dev->data->rx_queues[i];
2187 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2189 for (j = 0; j < count; j++)
2190 rte_pktmbuf_free(rx_pkts[j]);
2191 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2195 /* Stop tx queues */
2196 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2197 otx2_nix_tx_queue_stop(eth_dev, i);
2203 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
2205 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2208 /* MTU recalculate should be avoided here if PTP is enabled by PF, as
2209 * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
2212 if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
2213 rc = otx2_nix_recalc_mtu(eth_dev);
2218 /* Start rx queues */
2219 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2220 rc = otx2_nix_rx_queue_start(eth_dev, i);
2225 /* Start tx queues */
2226 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2227 rc = otx2_nix_tx_queue_start(eth_dev, i);
2232 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
2234 otx2_err("Failed to update flow ctrl mode %d", rc);
2238 /* Enable PTP if it was requested by the app or if it is already
2239 * enabled in PF owning this VF
2241 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
2242 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
2243 otx2_ethdev_is_ptp_en(dev))
2244 otx2_nix_timesync_enable(eth_dev);
2246 otx2_nix_timesync_disable(eth_dev);
2248 /* Update VF about data off shifted by 8 bytes if PTP already
2249 * enabled in PF owning this VF
2251 if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
2252 otx2_nix_ptp_enable_vf(eth_dev);
2254 if (dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F) {
2255 rc = rte_mbuf_dyn_rx_timestamp_register(
2256 &dev->tstamp.tstamp_dynfield_offset,
2257 &dev->tstamp.rx_tstamp_dynflag);
2259 otx2_err("Failed to register Rx timestamp field/flag");
2264 rc = npc_rx_enable(dev);
2266 otx2_err("Failed to enable NPC rx %d", rc);
2270 otx2_nix_toggle_flag_link_cfg(dev, true);
2272 rc = nix_cgx_start_link_event(dev);
2274 otx2_err("Failed to start cgx link event %d", rc);
2278 otx2_nix_toggle_flag_link_cfg(dev, false);
2279 otx2_eth_set_tx_function(eth_dev);
2280 otx2_eth_set_rx_function(eth_dev);
2285 npc_rx_disable(dev);
2286 otx2_nix_toggle_flag_link_cfg(dev, false);
2290 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
2291 static int otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
2293 /* Initialize and register driver with DPDK Application */
2294 static const struct eth_dev_ops otx2_eth_dev_ops = {
2295 .dev_infos_get = otx2_nix_info_get,
2296 .dev_configure = otx2_nix_configure,
2297 .link_update = otx2_nix_link_update,
2298 .tx_queue_setup = otx2_nix_tx_queue_setup,
2299 .tx_queue_release = otx2_nix_tx_queue_release,
2300 .tm_ops_get = otx2_nix_tm_ops_get,
2301 .rx_queue_setup = otx2_nix_rx_queue_setup,
2302 .rx_queue_release = otx2_nix_rx_queue_release,
2303 .dev_start = otx2_nix_dev_start,
2304 .dev_stop = otx2_nix_dev_stop,
2305 .dev_close = otx2_nix_dev_close,
2306 .tx_queue_start = otx2_nix_tx_queue_start,
2307 .tx_queue_stop = otx2_nix_tx_queue_stop,
2308 .rx_queue_start = otx2_nix_rx_queue_start,
2309 .rx_queue_stop = otx2_nix_rx_queue_stop,
2310 .dev_set_link_up = otx2_nix_dev_set_link_up,
2311 .dev_set_link_down = otx2_nix_dev_set_link_down,
2312 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
2313 .dev_ptypes_set = otx2_nix_ptypes_set,
2314 .dev_reset = otx2_nix_dev_reset,
2315 .stats_get = otx2_nix_dev_stats_get,
2316 .stats_reset = otx2_nix_dev_stats_reset,
2317 .get_reg = otx2_nix_dev_get_reg,
2318 .mtu_set = otx2_nix_mtu_set,
2319 .mac_addr_add = otx2_nix_mac_addr_add,
2320 .mac_addr_remove = otx2_nix_mac_addr_del,
2321 .mac_addr_set = otx2_nix_mac_addr_set,
2322 .set_mc_addr_list = otx2_nix_set_mc_addr_list,
2323 .promiscuous_enable = otx2_nix_promisc_enable,
2324 .promiscuous_disable = otx2_nix_promisc_disable,
2325 .allmulticast_enable = otx2_nix_allmulticast_enable,
2326 .allmulticast_disable = otx2_nix_allmulticast_disable,
2327 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
2328 .reta_update = otx2_nix_dev_reta_update,
2329 .reta_query = otx2_nix_dev_reta_query,
2330 .rss_hash_update = otx2_nix_rss_hash_update,
2331 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
2332 .xstats_get = otx2_nix_xstats_get,
2333 .xstats_get_names = otx2_nix_xstats_get_names,
2334 .xstats_reset = otx2_nix_xstats_reset,
2335 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
2336 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
2337 .rxq_info_get = otx2_nix_rxq_info_get,
2338 .txq_info_get = otx2_nix_txq_info_get,
2339 .rx_burst_mode_get = otx2_rx_burst_mode_get,
2340 .tx_burst_mode_get = otx2_tx_burst_mode_get,
2341 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
2342 .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
2343 .pool_ops_supported = otx2_nix_pool_ops_supported,
2344 .flow_ops_get = otx2_nix_dev_flow_ops_get,
2345 .get_module_info = otx2_nix_get_module_info,
2346 .get_module_eeprom = otx2_nix_get_module_eeprom,
2347 .fw_version_get = otx2_nix_fw_version_get,
2348 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
2349 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
2350 .timesync_enable = otx2_nix_timesync_enable,
2351 .timesync_disable = otx2_nix_timesync_disable,
2352 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
2353 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
2354 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
2355 .timesync_read_time = otx2_nix_timesync_read_time,
2356 .timesync_write_time = otx2_nix_timesync_write_time,
2357 .vlan_offload_set = otx2_nix_vlan_offload_set,
2358 .vlan_filter_set = otx2_nix_vlan_filter_set,
2359 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
2360 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
2361 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
2362 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
2363 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
2364 .read_clock = otx2_nix_read_clock,
2368 nix_lf_attach(struct otx2_eth_dev *dev)
2370 struct otx2_mbox *mbox = dev->mbox;
2371 struct rsrc_attach_req *req;
2373 /* Attach NIX(lf) */
2374 req = otx2_mbox_alloc_msg_attach_resources(mbox);
2378 return otx2_mbox_process(mbox);
2382 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
2384 struct otx2_mbox *mbox = dev->mbox;
2385 struct msix_offset_rsp *msix_rsp;
2388 /* Get NPA and NIX MSIX vector offsets */
2389 otx2_mbox_alloc_msg_msix_offset(mbox);
2391 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
2393 dev->nix_msixoff = msix_rsp->nix_msixoff;
2399 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
2401 struct rsrc_detach_req *req;
2403 req = otx2_mbox_alloc_msg_detach_resources(mbox);
2405 /* Detach all except npa lf */
2406 req->partial = true;
2413 return otx2_mbox_process(mbox);
2417 otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
2419 if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
2420 pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2425 static inline uint64_t
2426 nix_get_blkaddr(struct otx2_eth_dev *dev)
2430 /* Reading the discovery register to know which NIX is the LF
2433 reg = otx2_read64(dev->bar2 +
2434 RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0));
2436 return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1;
2440 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
2442 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2443 struct rte_pci_device *pci_dev;
2444 int rc, max_entries;
2446 eth_dev->dev_ops = &otx2_eth_dev_ops;
2447 eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
2448 eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
2449 eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
2450 eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
2452 /* For secondary processes, the primary has done all the work */
2453 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2454 /* Setup callbacks for secondary process */
2455 otx2_eth_set_tx_function(eth_dev);
2456 otx2_eth_set_rx_function(eth_dev);
2460 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2462 rte_eth_copy_pci_info(eth_dev, pci_dev);
2463 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2465 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
2466 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
2467 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
2469 /* Parse devargs string */
2470 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
2472 otx2_err("Failed to parse devargs rc=%d", rc);
2476 if (!dev->mbox_active) {
2477 /* Initialize the base otx2_dev object
2478 * only if already present
2480 rc = otx2_dev_init(pci_dev, dev);
2482 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
2486 if (otx2_eth_dev_is_sdp(pci_dev))
2487 dev->sdp_link = true;
2489 dev->sdp_link = false;
2490 /* Device generic callbacks */
2491 dev->ops = &otx2_dev_ops;
2492 dev->eth_dev = eth_dev;
2494 /* Grab the NPA LF if required */
2495 rc = otx2_npa_lf_init(pci_dev, dev);
2497 goto otx2_dev_uninit;
2499 dev->configured = 0;
2500 dev->drv_inited = true;
2501 dev->ptype_disable = 0;
2502 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2505 rc = nix_lf_attach(dev);
2507 goto otx2_npa_uninit;
2509 dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20);
2511 /* Get NIX MSIX offset */
2512 rc = nix_lf_get_msix_offset(dev);
2514 goto otx2_npa_uninit;
2516 /* Register LF irq handlers */
2517 rc = otx2_nix_register_irqs(eth_dev);
2521 /* Get maximum number of supported MAC entries */
2522 max_entries = otx2_cgx_mac_max_entries_get(dev);
2523 if (max_entries < 0) {
2524 otx2_err("Failed to get max entries for mac addr");
2526 goto unregister_irq;
2529 /* For VFs, returned max_entries will be 0. But to keep default MAC
2530 * address, one entry must be allocated. So setting up to 1.
2532 if (max_entries == 0)
2535 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2536 RTE_ETHER_ADDR_LEN, 0);
2537 if (eth_dev->data->mac_addrs == NULL) {
2538 otx2_err("Failed to allocate memory for mac addr");
2540 goto unregister_irq;
2543 dev->max_mac_entries = max_entries;
2545 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2547 goto free_mac_addrs;
2549 /* Update the mac address */
2550 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2552 /* Also sync same MAC address to CGX table */
2553 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
2555 /* Initialize the tm data structures */
2556 otx2_nix_tm_conf_init(eth_dev);
2558 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2559 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2561 if (otx2_dev_is_96xx_A0(dev) ||
2562 otx2_dev_is_95xx_Ax(dev)) {
2563 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2564 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2567 /* Create security ctx */
2568 rc = otx2_eth_sec_ctx_create(eth_dev);
2570 goto free_mac_addrs;
2571 dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2572 dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
2574 /* Initialize rte-flow */
2575 rc = otx2_flow_init(dev);
2577 goto sec_ctx_destroy;
2579 otx2_nix_mc_filter_init(dev);
2581 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2582 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2583 eth_dev->data->port_id, dev->pf, dev->vf,
2584 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2585 dev->rx_offload_capa, dev->tx_offload_capa);
2589 otx2_eth_sec_ctx_destroy(eth_dev);
2591 rte_free(eth_dev->data->mac_addrs);
2593 otx2_nix_unregister_irqs(eth_dev);
2595 otx2_eth_dev_lf_detach(dev->mbox);
2599 otx2_dev_fini(pci_dev, dev);
2601 otx2_err("Failed to init nix eth_dev rc=%d", rc);
2606 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2608 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2609 struct rte_pci_device *pci_dev;
2612 /* Nothing to be done for secondary processes */
2613 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2616 /* Clear the flag since we are closing down */
2617 dev->configured = 0;
2619 /* Disable nix bpid config */
2620 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2622 npc_rx_disable(dev);
2624 /* Disable vlan offloads */
2625 otx2_nix_vlan_fini(eth_dev);
2627 /* Disable other rte_flow entries */
2628 otx2_flow_fini(dev);
2630 /* Free multicast filter list */
2631 otx2_nix_mc_filter_fini(dev);
2633 /* Disable PTP if already enabled */
2634 if (otx2_ethdev_is_ptp_en(dev))
2635 otx2_nix_timesync_disable(eth_dev);
2637 nix_cgx_stop_link_event(dev);
2639 /* Unregister the dev ops, this is required to stop VFs from
2640 * receiving link status updates on exit path.
2645 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2646 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
2647 eth_dev->data->tx_queues[i] = NULL;
2649 eth_dev->data->nb_tx_queues = 0;
2651 /* Free up RQ's and CQ's */
2652 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2653 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
2654 eth_dev->data->rx_queues[i] = NULL;
2656 eth_dev->data->nb_rx_queues = 0;
2658 /* Free tm resources */
2659 rc = otx2_nix_tm_fini(eth_dev);
2661 otx2_err("Failed to cleanup tm, rc=%d", rc);
2663 /* Unregister queue irqs */
2664 oxt2_nix_unregister_queue_irqs(eth_dev);
2666 /* Unregister cq irqs */
2667 if (eth_dev->data->dev_conf.intr_conf.rxq)
2668 oxt2_nix_unregister_cq_irqs(eth_dev);
2670 rc = nix_lf_free(dev);
2672 otx2_err("Failed to free nix lf, rc=%d", rc);
2674 rc = otx2_npa_lf_fini();
2676 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2678 /* Disable security */
2679 otx2_eth_sec_fini(eth_dev);
2681 /* Destroy security ctx */
2682 otx2_eth_sec_ctx_destroy(eth_dev);
2684 rte_free(eth_dev->data->mac_addrs);
2685 eth_dev->data->mac_addrs = NULL;
2686 dev->drv_inited = false;
2688 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2689 otx2_nix_unregister_irqs(eth_dev);
2691 rc = otx2_eth_dev_lf_detach(dev->mbox);
2693 otx2_err("Failed to detach resources, rc=%d", rc);
2695 /* Check if mbox close is needed */
2699 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2700 /* Will be freed later by PMD */
2701 eth_dev->data->dev_private = NULL;
2705 otx2_dev_fini(pci_dev, dev);
2710 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2712 otx2_eth_dev_uninit(eth_dev, true);
2717 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2721 rc = otx2_eth_dev_uninit(eth_dev, false);
2725 return otx2_eth_dev_init(eth_dev);
2729 nix_remove(struct rte_pci_device *pci_dev)
2731 struct rte_eth_dev *eth_dev;
2732 struct otx2_idev_cfg *idev;
2733 struct otx2_dev *otx2_dev;
2736 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2738 /* Cleanup eth dev */
2739 rc = otx2_eth_dev_uninit(eth_dev, true);
2743 rte_eth_dev_release_port(eth_dev);
2746 /* Nothing to be done for secondary processes */
2747 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2750 /* Check for common resources */
2751 idev = otx2_intra_dev_get_cfg();
2752 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2755 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2757 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2760 /* Safe to cleanup mbox as no more users */
2761 otx2_dev_fini(pci_dev, otx2_dev);
2766 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2771 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2775 RTE_SET_USED(pci_drv);
2777 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2780 /* On error on secondary, recheck if port exists in primary or
2781 * in mid of detach state.
2783 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2784 if (!rte_eth_dev_allocated(pci_dev->device.name))
2789 static const struct rte_pci_id pci_nix_map[] = {
2791 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2794 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2797 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2798 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2801 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2802 PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
2805 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2806 PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2813 static struct rte_pci_driver pci_nix = {
2814 .id_table = pci_nix_map,
2815 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2816 RTE_PCI_DRV_INTR_LSC,
2818 .remove = nix_remove,
2821 RTE_PMD_REGISTER_PCI(OCTEONTX2_PMD, pci_nix);
2822 RTE_PMD_REGISTER_PCI_TABLE(OCTEONTX2_PMD, pci_nix_map);
2823 RTE_PMD_REGISTER_KMOD_DEP(OCTEONTX2_PMD, "vfio-pci");