1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
15 #include "otx2_ethdev_sec.h"
17 static inline uint64_t
18 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
20 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
22 if (otx2_dev_is_vf(dev) ||
23 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
24 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
29 static inline uint64_t
30 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
32 uint64_t capa = NIX_TX_OFFLOAD_CAPA;
34 /* TSO not supported for earlier chip revisions */
35 if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
36 capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
37 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
38 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
39 DEV_TX_OFFLOAD_GRE_TNL_TSO);
43 static const struct otx2_dev_ops otx2_dev_ops = {
44 .link_status_update = otx2_eth_dev_link_status_update,
45 .ptp_info_update = otx2_eth_dev_ptp_info_update
49 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
51 struct otx2_mbox *mbox = dev->mbox;
52 struct nix_lf_alloc_req *req;
53 struct nix_lf_alloc_rsp *rsp;
56 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
60 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
61 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
62 req->xqe_sz = NIX_XQESZ_W16;
63 req->rss_sz = dev->rss_info.rss_size;
64 req->rss_grps = NIX_RSS_GRPS;
65 req->npa_func = otx2_npa_pf_func_get();
66 req->sso_func = otx2_sso_pf_func_get();
67 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
68 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
69 DEV_RX_OFFLOAD_UDP_CKSUM)) {
70 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
71 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
73 req->rx_cfg |= (BIT_ULL(32 /* DROP_RE */) |
74 BIT_ULL(33 /* Outer L2 Length */) |
75 BIT_ULL(38 /* Inner L4 UDP Length */) |
76 BIT_ULL(39 /* Inner L3 Length */) |
77 BIT_ULL(40 /* Outer L4 UDP Length */) |
78 BIT_ULL(41 /* Outer L3 Length */));
80 if (dev->rss_tag_as_xor == 0)
81 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
83 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
87 dev->sqb_size = rsp->sqb_size;
88 dev->tx_chan_base = rsp->tx_chan_base;
89 dev->rx_chan_base = rsp->rx_chan_base;
90 dev->rx_chan_cnt = rsp->rx_chan_cnt;
91 dev->tx_chan_cnt = rsp->tx_chan_cnt;
92 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
93 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
94 dev->lf_tx_stats = rsp->lf_tx_stats;
95 dev->lf_rx_stats = rsp->lf_rx_stats;
96 dev->cints = rsp->cints;
97 dev->qints = rsp->qints;
98 dev->npc_flow.channel = dev->rx_chan_base;
99 dev->ptp_en = rsp->hw_rx_tstamp_en;
105 nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
107 struct otx2_mbox *mbox = dev->mbox;
108 struct npc_set_pkind *req;
109 struct msg_resp *rsp;
112 if (dev->npc_flow.switch_header_type == 0)
115 if (dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_LEN_90B &&
116 !otx2_dev_is_sdp(dev)) {
117 otx2_err("chlen90b is not supported on non-SDP device");
121 /* Notify AF about higig2 config */
122 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
123 req->mode = dev->npc_flow.switch_header_type;
125 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
127 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
130 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
131 req->mode = dev->npc_flow.switch_header_type;
133 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
135 return otx2_mbox_process_msg(mbox, (void *)&rsp);
139 nix_lf_free(struct otx2_eth_dev *dev)
141 struct otx2_mbox *mbox = dev->mbox;
142 struct nix_lf_free_req *req;
143 struct ndc_sync_op *ndc_req;
146 /* Sync NDC-NIX for LF */
147 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
148 ndc_req->nix_lf_tx_sync = 1;
149 ndc_req->nix_lf_rx_sync = 1;
150 rc = otx2_mbox_process(mbox);
152 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
154 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
155 /* Let AF driver free all this nix lf's
156 * NPC entries allocated using NPC MBOX.
160 return otx2_mbox_process(mbox);
164 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
166 struct otx2_mbox *mbox = dev->mbox;
168 if (otx2_dev_is_vf_or_sdp(dev))
171 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
173 return otx2_mbox_process(mbox);
177 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
179 struct otx2_mbox *mbox = dev->mbox;
181 if (otx2_dev_is_vf_or_sdp(dev))
184 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
186 return otx2_mbox_process(mbox);
190 npc_rx_enable(struct otx2_eth_dev *dev)
192 struct otx2_mbox *mbox = dev->mbox;
194 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
196 return otx2_mbox_process(mbox);
200 npc_rx_disable(struct otx2_eth_dev *dev)
202 struct otx2_mbox *mbox = dev->mbox;
204 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
206 return otx2_mbox_process(mbox);
210 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
212 struct otx2_mbox *mbox = dev->mbox;
214 if (otx2_dev_is_vf_or_sdp(dev))
217 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
219 return otx2_mbox_process(mbox);
223 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
225 struct otx2_mbox *mbox = dev->mbox;
227 if (en && otx2_dev_is_vf_or_sdp(dev))
231 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
233 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
235 return otx2_mbox_process(mbox);
239 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
241 struct otx2_mbox *mbox = dev->mbox;
243 if (otx2_dev_is_vf_or_sdp(dev))
246 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
248 return otx2_mbox_process(mbox);
252 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
258 static inline uint32_t
259 nix_qsize_to_val(enum nix_q_size_e qsize)
261 return (16UL << (qsize * 2));
264 static inline enum nix_q_size_e
265 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
269 if (otx2_ethdev_fixup_is_min_4k_q(dev))
274 for (; i < nix_q_size_max; i++)
275 if (val <= nix_qsize_to_val(i))
278 if (i >= nix_q_size_max)
279 i = nix_q_size_max - 1;
285 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
286 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
288 struct otx2_mbox *mbox = dev->mbox;
289 const struct rte_memzone *rz;
290 uint32_t ring_size, cq_size;
291 struct nix_aq_enq_req *aq;
296 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
297 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
298 NIX_CQ_ALIGN, dev->node);
300 otx2_err("Failed to allocate mem for cq hw ring");
303 memset(rz->addr, 0, rz->len);
304 rxq->desc = (uintptr_t)rz->addr;
305 rxq->qmask = cq_size - 1;
307 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
309 aq->ctype = NIX_AQ_CTYPE_CQ;
310 aq->op = NIX_AQ_INSTOP_INIT;
314 aq->cq.qsize = rxq->qsize;
315 aq->cq.base = rz->iova;
316 aq->cq.avg_level = 0xff;
317 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
318 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
320 /* Many to one reduction */
321 aq->cq.qint_idx = qid % dev->qints;
322 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
323 aq->cq.cint_idx = qid;
325 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
326 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
327 uint16_t min_rx_drop;
329 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
330 aq->cq.drop = min_rx_drop;
332 rxq->cq_drop = min_rx_drop;
334 rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
335 aq->cq.drop = rxq->cq_drop;
339 /* TX pause frames enable flowctrl on RX side */
340 if (dev->fc_info.tx_pause) {
341 /* Single bpid is allocated for all rx channels for now */
342 aq->cq.bpid = dev->fc_info.bpid[0];
343 aq->cq.bp = rxq->cq_drop;
347 rc = otx2_mbox_process(mbox);
349 otx2_err("Failed to init cq context");
353 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
355 aq->ctype = NIX_AQ_CTYPE_RQ;
356 aq->op = NIX_AQ_INSTOP_INIT;
360 if (rxq->offloads & DEV_RX_OFFLOAD_SECURITY)
361 aq->rq.ipsech_ena = 1;
363 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
365 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
366 first_skip = (sizeof(struct rte_mbuf));
367 first_skip += RTE_PKTMBUF_HEADROOM;
368 first_skip += rte_pktmbuf_priv_size(mp);
369 rxq->data_off = first_skip;
371 first_skip /= 8; /* Expressed in number of dwords */
372 aq->rq.first_skip = first_skip;
373 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
374 aq->rq.flow_tagw = 32; /* 32-bits */
375 aq->rq.lpb_sizem1 = mp->elt_size / 8;
376 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
378 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
379 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
380 aq->rq.rq_int_ena = 0;
381 /* Many to one reduction */
382 aq->rq.qint_idx = qid % dev->qints;
384 aq->rq.xqe_drop_ena = 1;
386 rc = otx2_mbox_process(mbox);
388 otx2_err("Failed to init rq context");
392 if (dev->lock_rx_ctx) {
393 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
395 aq->ctype = NIX_AQ_CTYPE_CQ;
396 aq->op = NIX_AQ_INSTOP_LOCK;
398 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
400 /* The shared memory buffer can be full.
403 otx2_mbox_msg_send(mbox, 0);
404 rc = otx2_mbox_wait_for_rsp(mbox, 0);
406 otx2_err("Failed to LOCK cq context");
410 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
412 otx2_err("Failed to LOCK rq context");
417 aq->ctype = NIX_AQ_CTYPE_RQ;
418 aq->op = NIX_AQ_INSTOP_LOCK;
419 rc = otx2_mbox_process(mbox);
421 otx2_err("Failed to LOCK rq context");
430 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
431 struct otx2_eth_rxq *rxq, const bool enb)
433 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
434 struct otx2_mbox *mbox = dev->mbox;
435 struct nix_aq_enq_req *aq;
437 /* Pkts will be dropped silently if RQ is disabled */
438 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
440 aq->ctype = NIX_AQ_CTYPE_RQ;
441 aq->op = NIX_AQ_INSTOP_WRITE;
444 aq->rq_mask.ena = ~(aq->rq_mask.ena);
446 return otx2_mbox_process(mbox);
450 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
452 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
453 struct otx2_mbox *mbox = dev->mbox;
454 struct nix_aq_enq_req *aq;
457 /* RQ is already disabled */
459 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
461 aq->ctype = NIX_AQ_CTYPE_CQ;
462 aq->op = NIX_AQ_INSTOP_WRITE;
465 aq->cq_mask.ena = ~(aq->cq_mask.ena);
467 rc = otx2_mbox_process(mbox);
469 otx2_err("Failed to disable cq context");
473 if (dev->lock_rx_ctx) {
474 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
476 aq->ctype = NIX_AQ_CTYPE_CQ;
477 aq->op = NIX_AQ_INSTOP_UNLOCK;
479 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
481 /* The shared memory buffer can be full.
484 otx2_mbox_msg_send(mbox, 0);
485 rc = otx2_mbox_wait_for_rsp(mbox, 0);
487 otx2_err("Failed to UNLOCK cq context");
491 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
493 otx2_err("Failed to UNLOCK rq context");
498 aq->ctype = NIX_AQ_CTYPE_RQ;
499 aq->op = NIX_AQ_INSTOP_UNLOCK;
500 rc = otx2_mbox_process(mbox);
502 otx2_err("Failed to UNLOCK rq context");
511 nix_get_data_off(struct otx2_eth_dev *dev)
513 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
517 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
519 struct rte_mbuf mb_def;
522 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
523 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
524 offsetof(struct rte_mbuf, data_off) != 2);
525 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
526 offsetof(struct rte_mbuf, data_off) != 4);
527 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
528 offsetof(struct rte_mbuf, data_off) != 6);
530 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
531 mb_def.port = port_id;
532 rte_mbuf_refcnt_set(&mb_def, 1);
534 /* Prevent compiler reordering: rearm_data covers previous fields */
535 rte_compiler_barrier();
536 tmp = (uint64_t *)&mb_def.rearm_data;
542 otx2_nix_rx_queue_release(void *rx_queue)
544 struct otx2_eth_rxq *rxq = rx_queue;
549 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
550 nix_cq_rq_uninit(rxq->eth_dev, rxq);
555 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
556 uint16_t nb_desc, unsigned int socket,
557 const struct rte_eth_rxconf *rx_conf,
558 struct rte_mempool *mp)
560 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
561 struct rte_mempool_ops *ops;
562 struct otx2_eth_rxq *rxq;
563 const char *platform_ops;
564 enum nix_q_size_e qsize;
570 /* Compile time check to make sure all fast path elements in a CL */
571 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
574 if (rx_conf->rx_deferred_start == 1) {
575 otx2_err("Deferred Rx start is not supported");
579 platform_ops = rte_mbuf_platform_mempool_ops();
580 /* This driver needs octeontx2_npa mempool ops to work */
581 ops = rte_mempool_get_ops(mp->ops_index);
582 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
583 otx2_err("mempool ops should be of octeontx2_npa type");
587 if (mp->pool_id == 0) {
588 otx2_err("Invalid pool_id");
592 /* Free memory prior to re-allocation if needed */
593 if (eth_dev->data->rx_queues[rq] != NULL) {
594 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
595 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
596 rte_eth_dma_zone_free(eth_dev, "cq", rq);
597 eth_dev->data->rx_queues[rq] = NULL;
600 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
601 dev->rx_offloads |= offloads;
603 /* Find the CQ queue size */
604 qsize = nix_qsize_clampup_get(dev, nb_desc);
605 /* Allocate rxq memory */
606 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
608 otx2_err("Failed to allocate rq=%d", rq);
613 rxq->eth_dev = eth_dev;
615 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
616 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
617 rxq->wdata = (uint64_t)rq << 32;
618 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
619 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
620 eth_dev->data->port_id);
621 rxq->offloads = offloads;
623 rxq->qlen = nix_qsize_to_val(qsize);
625 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
626 rxq->tstamp = &dev->tstamp;
628 /* Alloc completion queue */
629 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
631 otx2_err("Failed to allocate rxq=%u", rq);
635 rxq->qconf.socket_id = socket;
636 rxq->qconf.nb_desc = nb_desc;
637 rxq->qconf.mempool = mp;
638 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
640 nix_rx_queue_reset(rxq);
641 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
642 rq, mp->name, qsize, nb_desc, rxq->qlen);
644 eth_dev->data->rx_queues[rq] = rxq;
645 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
647 /* Calculating delta and freq mult between PTP HI clock and tsc.
648 * These are needed in deriving raw clock value from tsc counter.
649 * read_clock eth op returns raw clock value.
651 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
652 otx2_ethdev_is_ptp_en(dev)) {
653 rc = otx2_nix_raw_clock_tsc_conv(dev);
655 otx2_err("Failed to calculate delta and freq mult");
663 otx2_nix_rx_queue_release(rxq);
668 static inline uint8_t
669 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
672 * Maximum three segments can be supported with W8, Choose
673 * NIX_MAXSQESZ_W16 for multi segment offload.
675 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
676 return NIX_MAXSQESZ_W16;
678 return NIX_MAXSQESZ_W8;
682 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
684 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
685 struct rte_eth_dev_data *data = eth_dev->data;
686 struct rte_eth_conf *conf = &data->dev_conf;
687 struct rte_eth_rxmode *rxmode = &conf->rxmode;
690 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
691 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
692 flags |= NIX_RX_OFFLOAD_RSS_F;
694 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
695 DEV_RX_OFFLOAD_UDP_CKSUM))
696 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
698 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
699 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
700 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
702 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
703 flags |= NIX_RX_MULTI_SEG_F;
705 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
706 DEV_RX_OFFLOAD_QINQ_STRIP))
707 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
709 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
710 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
712 if (dev->rx_offloads & DEV_RX_OFFLOAD_SECURITY)
713 flags |= NIX_RX_OFFLOAD_SECURITY_F;
715 if (!dev->ptype_disable)
716 flags |= NIX_RX_OFFLOAD_PTYPE_F;
722 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
724 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
725 uint64_t conf = dev->tx_offloads;
728 /* Fastpath is dependent on these enums */
729 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
730 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
731 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
732 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
733 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
734 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
735 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
736 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
737 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
738 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
739 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
740 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
741 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
742 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
743 offsetof(struct rte_mbuf, buf_iova) + 8);
744 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
745 offsetof(struct rte_mbuf, buf_iova) + 16);
746 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
747 offsetof(struct rte_mbuf, ol_flags) + 12);
748 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
749 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
751 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
752 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
753 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
755 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
756 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
757 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
759 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
760 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
761 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
762 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
763 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
765 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
766 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
768 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
769 flags |= NIX_TX_MULTI_SEG_F;
771 /* Enable Inner checksum for TSO */
772 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
773 flags |= (NIX_TX_OFFLOAD_TSO_F |
774 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
776 /* Enable Inner and Outer checksum for Tunnel TSO */
777 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
778 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
779 DEV_TX_OFFLOAD_GRE_TNL_TSO))
780 flags |= (NIX_TX_OFFLOAD_TSO_F |
781 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
782 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
784 if (conf & DEV_TX_OFFLOAD_SECURITY)
785 flags |= NIX_TX_OFFLOAD_SECURITY_F;
787 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
788 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
794 nix_sqb_lock(struct rte_mempool *mp)
796 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
797 struct npa_aq_enq_req *req;
800 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
801 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
802 req->ctype = NPA_AQ_CTYPE_AURA;
803 req->op = NPA_AQ_INSTOP_LOCK;
805 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
807 /* The shared memory buffer can be full.
810 otx2_mbox_msg_send(npa_lf->mbox, 0);
811 rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
813 otx2_err("Failed to LOCK AURA context");
817 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
819 otx2_err("Failed to LOCK POOL context");
824 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
825 req->ctype = NPA_AQ_CTYPE_POOL;
826 req->op = NPA_AQ_INSTOP_LOCK;
828 rc = otx2_mbox_process(npa_lf->mbox);
830 otx2_err("Unable to lock POOL in NDC");
838 nix_sqb_unlock(struct rte_mempool *mp)
840 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
841 struct npa_aq_enq_req *req;
844 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
845 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
846 req->ctype = NPA_AQ_CTYPE_AURA;
847 req->op = NPA_AQ_INSTOP_UNLOCK;
849 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
851 /* The shared memory buffer can be full.
854 otx2_mbox_msg_send(npa_lf->mbox, 0);
855 rc = otx2_mbox_wait_for_rsp(npa_lf->mbox, 0);
857 otx2_err("Failed to UNLOCK AURA context");
861 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
863 otx2_err("Failed to UNLOCK POOL context");
867 req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
868 req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
869 req->ctype = NPA_AQ_CTYPE_POOL;
870 req->op = NPA_AQ_INSTOP_UNLOCK;
872 rc = otx2_mbox_process(npa_lf->mbox);
874 otx2_err("Unable to UNLOCK AURA in NDC");
882 nix_sq_init(struct otx2_eth_txq *txq)
884 struct otx2_eth_dev *dev = txq->dev;
885 struct otx2_mbox *mbox = dev->mbox;
886 struct nix_aq_enq_req *sq;
891 if (txq->sqb_pool->pool_id == 0)
894 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
896 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
900 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
902 sq->ctype = NIX_AQ_CTYPE_SQ;
903 sq->op = NIX_AQ_INSTOP_INIT;
904 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
907 sq->sq.smq_rr_quantum = rr_quantum;
908 sq->sq.default_chan = dev->tx_chan_base;
909 sq->sq.sqe_stype = NIX_STYPE_STF;
911 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
912 sq->sq.sqe_stype = NIX_STYPE_STP;
914 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
915 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
916 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
917 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
918 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
920 /* Many to one reduction */
921 sq->sq.qint_idx = txq->sq % dev->qints;
923 rc = otx2_mbox_process(mbox);
927 if (dev->lock_tx_ctx) {
928 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
930 sq->ctype = NIX_AQ_CTYPE_SQ;
931 sq->op = NIX_AQ_INSTOP_LOCK;
933 rc = otx2_mbox_process(mbox);
940 nix_sq_uninit(struct otx2_eth_txq *txq)
942 struct otx2_eth_dev *dev = txq->dev;
943 struct otx2_mbox *mbox = dev->mbox;
944 struct ndc_sync_op *ndc_req;
945 struct nix_aq_enq_rsp *rsp;
946 struct nix_aq_enq_req *aq;
947 uint16_t sqes_per_sqb;
951 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
953 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
955 aq->ctype = NIX_AQ_CTYPE_SQ;
956 aq->op = NIX_AQ_INSTOP_READ;
958 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
962 /* Check if sq is already cleaned up */
967 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
969 aq->ctype = NIX_AQ_CTYPE_SQ;
970 aq->op = NIX_AQ_INSTOP_WRITE;
972 aq->sq_mask.ena = ~aq->sq_mask.ena;
975 rc = otx2_mbox_process(mbox);
979 if (dev->lock_tx_ctx) {
981 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
983 aq->ctype = NIX_AQ_CTYPE_SQ;
984 aq->op = NIX_AQ_INSTOP_UNLOCK;
986 rc = otx2_mbox_process(mbox);
990 nix_sqb_unlock(txq->sqb_pool);
993 /* Read SQ and free sqb's */
994 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
996 aq->ctype = NIX_AQ_CTYPE_SQ;
997 aq->op = NIX_AQ_INSTOP_READ;
999 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1003 if (aq->sq.smq_pend)
1004 otx2_err("SQ has pending sqe's");
1006 count = aq->sq.sqb_count;
1007 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
1008 /* Free SQB's that are used */
1009 sqb_buf = (void *)rsp->sq.head_sqb;
1013 next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
1014 ((sqes_per_sqb - 1) *
1015 nix_sq_max_sqe_sz(txq)));
1016 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1022 /* Free next to use sqb */
1023 if (rsp->sq.next_sqb)
1024 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
1027 /* Sync NDC-NIX-TX for LF */
1028 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
1029 ndc_req->nix_lf_tx_sync = 1;
1030 rc = otx2_mbox_process(mbox);
1032 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
1038 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
1040 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
1041 struct npa_aq_enq_req *aura_req;
1043 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
1044 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
1045 aura_req->ctype = NPA_AQ_CTYPE_AURA;
1046 aura_req->op = NPA_AQ_INSTOP_WRITE;
1048 aura_req->aura.limit = nb_sqb_bufs;
1049 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
1051 return otx2_mbox_process(npa_lf->mbox);
1055 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
1057 struct otx2_eth_dev *dev = txq->dev;
1058 uint16_t sqes_per_sqb, nb_sqb_bufs;
1059 char name[RTE_MEMPOOL_NAMESIZE];
1060 struct rte_mempool_objsz sz;
1061 struct npa_aura_s *aura;
1062 uint32_t tmp, blk_sz;
1064 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
1065 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
1066 blk_sz = dev->sqb_size;
1068 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
1069 sqes_per_sqb = (dev->sqb_size / 8) / 16;
1071 sqes_per_sqb = (dev->sqb_size / 8) / 8;
1073 nb_sqb_bufs = nb_desc / sqes_per_sqb;
1074 /* Clamp up to devarg passed SQB count */
1075 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
1076 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
1078 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
1080 MEMPOOL_F_NO_SPREAD);
1081 txq->nb_sqb_bufs = nb_sqb_bufs;
1082 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
1083 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
1084 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
1085 txq->nb_sqb_bufs_adj =
1086 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
1088 if (txq->sqb_pool == NULL) {
1089 otx2_err("Failed to allocate sqe mempool");
1093 memset(aura, 0, sizeof(*aura));
1095 aura->fc_addr = txq->fc_iova;
1096 aura->fc_hyst_bits = 0; /* Store count on all updates */
1097 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
1098 otx2_err("Failed to set ops for sqe mempool");
1101 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
1102 otx2_err("Failed to populate sqe mempool");
1106 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
1107 if (dev->sqb_size != sz.elt_size) {
1108 otx2_err("sqe pool block size is not expected %d != %d",
1109 dev->sqb_size, tmp);
1113 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
1114 if (dev->lock_tx_ctx)
1115 nix_sqb_lock(txq->sqb_pool);
1123 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
1125 struct nix_send_ext_s *send_hdr_ext;
1126 struct nix_send_hdr_s *send_hdr;
1127 struct nix_send_mem_s *send_mem;
1128 union nix_send_sg_s *sg;
1130 /* Initialize the fields based on basic single segment packet */
1131 memset(&txq->cmd, 0, sizeof(txq->cmd));
1133 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
1134 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1135 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
1136 send_hdr->w0.sizem1 = 2;
1138 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
1139 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
1140 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
1141 /* Default: one seg packet would have:
1142 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
1145 send_hdr->w0.sizem1 = 3;
1146 send_hdr_ext->w0.tstmp = 1;
1148 /* To calculate the offset for send_mem,
1149 * send_hdr->w0.sizem1 * 2
1151 send_mem = (struct nix_send_mem_s *)(txq->cmd +
1152 (send_hdr->w0.sizem1 << 1));
1153 send_mem->subdc = NIX_SUBDC_MEM;
1154 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
1155 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
1157 sg = (union nix_send_sg_s *)&txq->cmd[4];
1159 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
1160 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
1161 send_hdr->w0.sizem1 = 1;
1162 sg = (union nix_send_sg_s *)&txq->cmd[2];
1165 send_hdr->w0.sq = txq->sq;
1166 sg->subdc = NIX_SUBDC_SG;
1168 sg->ld_type = NIX_SENDLDTYPE_LDD;
1174 otx2_nix_tx_queue_release(void *_txq)
1176 struct otx2_eth_txq *txq = _txq;
1177 struct rte_eth_dev *eth_dev;
1182 eth_dev = txq->dev->eth_dev;
1184 otx2_nix_dbg("Releasing txq %u", txq->sq);
1186 /* Flush and disable tm */
1187 otx2_nix_sq_flush_pre(txq, eth_dev->data->dev_started);
1189 /* Free sqb's and disable sq */
1192 if (txq->sqb_pool) {
1193 rte_mempool_free(txq->sqb_pool);
1194 txq->sqb_pool = NULL;
1196 otx2_nix_sq_flush_post(txq);
1202 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
1203 uint16_t nb_desc, unsigned int socket_id,
1204 const struct rte_eth_txconf *tx_conf)
1206 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1207 const struct rte_memzone *fc;
1208 struct otx2_eth_txq *txq;
1214 /* Compile time check to make sure all fast path elements in a CL */
1215 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
1217 if (tx_conf->tx_deferred_start) {
1218 otx2_err("Tx deferred start is not supported");
1222 /* Free memory prior to re-allocation if needed. */
1223 if (eth_dev->data->tx_queues[sq] != NULL) {
1224 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
1225 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
1226 eth_dev->data->tx_queues[sq] = NULL;
1229 /* Find the expected offloads for this queue */
1230 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
1232 /* Allocating tx queue data structure */
1233 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
1234 OTX2_ALIGN, socket_id);
1236 otx2_err("Failed to alloc txq=%d", sq);
1242 txq->sqb_pool = NULL;
1243 txq->offloads = offloads;
1244 dev->tx_offloads |= offloads;
1247 * Allocate memory for flow control updates from HW.
1248 * Alloc one cache line, so that fits all FC_STYPE modes.
1250 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1251 OTX2_ALIGN + sizeof(struct npa_aura_s),
1252 OTX2_ALIGN, dev->node);
1254 otx2_err("Failed to allocate mem for fcmem");
1258 txq->fc_iova = fc->iova;
1259 txq->fc_mem = fc->addr;
1261 /* Initialize the aura sqb pool */
1262 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1264 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1268 /* Initialize the SQ */
1269 rc = nix_sq_init(txq);
1271 otx2_err("Failed to init sq=%d context", sq);
1275 txq->fc_cache_pkts = 0;
1276 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1277 /* Evenly distribute LMT slot for each sq */
1278 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1280 txq->qconf.socket_id = socket_id;
1281 txq->qconf.nb_desc = nb_desc;
1282 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1284 otx2_nix_form_default_desc(txq);
1286 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1287 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1288 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1289 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1290 eth_dev->data->tx_queues[sq] = txq;
1291 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1295 otx2_nix_tx_queue_release(txq);
1301 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1303 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1304 struct otx2_eth_qconf *tx_qconf = NULL;
1305 struct otx2_eth_qconf *rx_qconf = NULL;
1306 struct otx2_eth_txq **txq;
1307 struct otx2_eth_rxq **rxq;
1308 int i, nb_rxq, nb_txq;
1310 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1311 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1313 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1314 if (tx_qconf == NULL) {
1315 otx2_err("Failed to allocate memory for tx_qconf");
1319 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1320 if (rx_qconf == NULL) {
1321 otx2_err("Failed to allocate memory for rx_qconf");
1325 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1326 for (i = 0; i < nb_txq; i++) {
1327 if (txq[i] == NULL) {
1328 tx_qconf[i].valid = false;
1329 otx2_info("txq[%d] is already released", i);
1332 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1333 tx_qconf[i].valid = true;
1334 otx2_nix_tx_queue_release(txq[i]);
1335 eth_dev->data->tx_queues[i] = NULL;
1338 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1339 for (i = 0; i < nb_rxq; i++) {
1340 if (rxq[i] == NULL) {
1341 rx_qconf[i].valid = false;
1342 otx2_info("rxq[%d] is already released", i);
1345 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1346 rx_qconf[i].valid = true;
1347 otx2_nix_rx_queue_release(rxq[i]);
1348 eth_dev->data->rx_queues[i] = NULL;
1351 dev->tx_qconf = tx_qconf;
1352 dev->rx_qconf = rx_qconf;
1365 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1367 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1368 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1369 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1370 struct otx2_eth_txq **txq;
1371 struct otx2_eth_rxq **rxq;
1372 int rc, i, nb_rxq, nb_txq;
1374 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1375 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1378 /* Setup tx & rx queues with previous configuration so
1379 * that the queues can be functional in cases like ports
1380 * are started without re configuring queues.
1382 * Usual re config sequence is like below:
1383 * port_configure() {
1388 * queue_configure() {
1395 * In some application's control path, queue_configure() would
1396 * NOT be invoked for TXQs/RXQs in port_configure().
1397 * In such cases, queues can be functional after start as the
1398 * queues are already setup in port_configure().
1400 for (i = 0; i < nb_txq; i++) {
1401 if (!tx_qconf[i].valid)
1403 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1404 tx_qconf[i].socket_id,
1405 &tx_qconf[i].conf.tx);
1407 otx2_err("Failed to setup tx queue rc=%d", rc);
1408 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1409 for (i -= 1; i >= 0; i--)
1410 otx2_nix_tx_queue_release(txq[i]);
1415 free(tx_qconf); tx_qconf = NULL;
1417 for (i = 0; i < nb_rxq; i++) {
1418 if (!rx_qconf[i].valid)
1420 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1421 rx_qconf[i].socket_id,
1422 &rx_qconf[i].conf.rx,
1423 rx_qconf[i].mempool);
1425 otx2_err("Failed to setup rx queue rc=%d", rc);
1426 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1427 for (i -= 1; i >= 0; i--)
1428 otx2_nix_rx_queue_release(rxq[i]);
1429 goto release_tx_queues;
1433 free(rx_qconf); rx_qconf = NULL;
1438 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1439 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1440 otx2_nix_tx_queue_release(txq[i]);
1451 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1453 RTE_SET_USED(queue);
1454 RTE_SET_USED(mbufs);
1461 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1463 /* These dummy functions are required for supporting
1464 * some applications which reconfigure queues without
1465 * stopping tx burst and rx burst threads(eg kni app)
1466 * When the queues context is saved, txq/rxqs are released
1467 * which caused app crash since rx/tx burst is still
1468 * on different lcores
1470 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1471 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1476 nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1478 volatile struct nix_lso_format *field;
1480 /* Format works only with TCP packet marked by OL3/OL4 */
1481 field = (volatile struct nix_lso_format *)&req->fields[0];
1482 req->field_mask = NIX_LSO_FIELD_MASK;
1483 /* Outer IPv4/IPv6 */
1484 field->layer = NIX_TXLAYER_OL3;
1485 field->offset = v4 ? 2 : 4;
1486 field->sizem1 = 1; /* 2B */
1487 field->alg = NIX_LSOALG_ADD_PAYLEN;
1491 field->layer = NIX_TXLAYER_OL3;
1494 /* Incremented linearly per segment */
1495 field->alg = NIX_LSOALG_ADD_SEGNUM;
1499 /* TCP sequence number update */
1500 field->layer = NIX_TXLAYER_OL4;
1502 field->sizem1 = 3; /* 4 bytes */
1503 field->alg = NIX_LSOALG_ADD_OFFSET;
1505 /* TCP flags field */
1506 field->layer = NIX_TXLAYER_OL4;
1509 field->alg = NIX_LSOALG_TCP_FLAGS;
1514 nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1515 bool outer_v4, bool inner_v4)
1517 volatile struct nix_lso_format *field;
1519 field = (volatile struct nix_lso_format *)&req->fields[0];
1520 req->field_mask = NIX_LSO_FIELD_MASK;
1521 /* Outer IPv4/IPv6 len */
1522 field->layer = NIX_TXLAYER_OL3;
1523 field->offset = outer_v4 ? 2 : 4;
1524 field->sizem1 = 1; /* 2B */
1525 field->alg = NIX_LSOALG_ADD_PAYLEN;
1529 field->layer = NIX_TXLAYER_OL3;
1532 /* Incremented linearly per segment */
1533 field->alg = NIX_LSOALG_ADD_SEGNUM;
1537 /* Outer UDP length */
1538 field->layer = NIX_TXLAYER_OL4;
1541 field->alg = NIX_LSOALG_ADD_PAYLEN;
1544 /* Inner IPv4/IPv6 */
1545 field->layer = NIX_TXLAYER_IL3;
1546 field->offset = inner_v4 ? 2 : 4;
1547 field->sizem1 = 1; /* 2B */
1548 field->alg = NIX_LSOALG_ADD_PAYLEN;
1552 field->layer = NIX_TXLAYER_IL3;
1555 /* Incremented linearly per segment */
1556 field->alg = NIX_LSOALG_ADD_SEGNUM;
1560 /* TCP sequence number update */
1561 field->layer = NIX_TXLAYER_IL4;
1563 field->sizem1 = 3; /* 4 bytes */
1564 field->alg = NIX_LSOALG_ADD_OFFSET;
1567 /* TCP flags field */
1568 field->layer = NIX_TXLAYER_IL4;
1571 field->alg = NIX_LSOALG_TCP_FLAGS;
1576 nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
1577 bool outer_v4, bool inner_v4)
1579 volatile struct nix_lso_format *field;
1581 field = (volatile struct nix_lso_format *)&req->fields[0];
1582 req->field_mask = NIX_LSO_FIELD_MASK;
1583 /* Outer IPv4/IPv6 len */
1584 field->layer = NIX_TXLAYER_OL3;
1585 field->offset = outer_v4 ? 2 : 4;
1586 field->sizem1 = 1; /* 2B */
1587 field->alg = NIX_LSOALG_ADD_PAYLEN;
1591 field->layer = NIX_TXLAYER_OL3;
1594 /* Incremented linearly per segment */
1595 field->alg = NIX_LSOALG_ADD_SEGNUM;
1599 /* Inner IPv4/IPv6 */
1600 field->layer = NIX_TXLAYER_IL3;
1601 field->offset = inner_v4 ? 2 : 4;
1602 field->sizem1 = 1; /* 2B */
1603 field->alg = NIX_LSOALG_ADD_PAYLEN;
1607 field->layer = NIX_TXLAYER_IL3;
1610 /* Incremented linearly per segment */
1611 field->alg = NIX_LSOALG_ADD_SEGNUM;
1615 /* TCP sequence number update */
1616 field->layer = NIX_TXLAYER_IL4;
1618 field->sizem1 = 3; /* 4 bytes */
1619 field->alg = NIX_LSOALG_ADD_OFFSET;
1622 /* TCP flags field */
1623 field->layer = NIX_TXLAYER_IL4;
1626 field->alg = NIX_LSOALG_TCP_FLAGS;
1631 nix_setup_lso_formats(struct otx2_eth_dev *dev)
1633 struct otx2_mbox *mbox = dev->mbox;
1634 struct nix_lso_format_cfg_rsp *rsp;
1635 struct nix_lso_format_cfg *req;
1639 /* Skip if TSO was not requested */
1640 if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1645 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1646 nix_lso_tcp(req, true);
1647 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1651 base = rsp->lso_format_idx;
1652 if (base != NIX_LSO_FORMAT_IDX_TSOV4)
1654 dev->lso_base_idx = base;
1655 otx2_nix_dbg("tcpv4 lso fmt=%u", base);
1661 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1662 nix_lso_tcp(req, false);
1663 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1667 if (rsp->lso_format_idx != base + 1)
1669 otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
1672 * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1674 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1675 nix_lso_udp_tun_tcp(req, true, true);
1676 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1680 if (rsp->lso_format_idx != base + 2)
1682 otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
1685 * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1687 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1688 nix_lso_udp_tun_tcp(req, true, false);
1689 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1693 if (rsp->lso_format_idx != base + 3)
1695 otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
1698 * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1700 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1701 nix_lso_udp_tun_tcp(req, false, true);
1702 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1706 if (rsp->lso_format_idx != base + 4)
1708 otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
1711 * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1713 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1714 nix_lso_udp_tun_tcp(req, false, false);
1715 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1718 if (rsp->lso_format_idx != base + 5)
1720 otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
1723 * IPv4/TUN HDR/IPv4/TCP LSO
1725 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1726 nix_lso_tun_tcp(req, true, true);
1727 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1731 if (rsp->lso_format_idx != base + 6)
1733 otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
1736 * IPv4/TUN HDR/IPv6/TCP LSO
1738 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1739 nix_lso_tun_tcp(req, true, false);
1740 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1744 if (rsp->lso_format_idx != base + 7)
1746 otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
1749 * IPv6/TUN HDR/IPv4/TCP LSO
1751 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1752 nix_lso_tun_tcp(req, false, true);
1753 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1757 if (rsp->lso_format_idx != base + 8)
1759 otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
1762 * IPv6/TUN HDR/IPv6/TCP LSO
1764 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1765 nix_lso_tun_tcp(req, false, false);
1766 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1769 if (rsp->lso_format_idx != base + 9)
1771 otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
1776 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1778 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1779 struct rte_eth_dev_data *data = eth_dev->data;
1780 struct rte_eth_conf *conf = &data->dev_conf;
1781 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1782 struct rte_eth_txmode *txmode = &conf->txmode;
1783 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1784 struct rte_ether_addr *ea;
1785 uint8_t nb_rxq, nb_txq;
1791 if (rte_eal_has_hugepages() == 0) {
1792 otx2_err("Huge page is not configured");
1793 goto fail_configure;
1796 if (conf->dcb_capability_en == 1) {
1797 otx2_err("dcb enable is not supported");
1798 goto fail_configure;
1801 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1802 otx2_err("Flow director is not supported");
1803 goto fail_configure;
1806 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1807 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1808 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1809 goto fail_configure;
1812 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1813 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1814 goto fail_configure;
1817 if (otx2_dev_is_Ax(dev) &&
1818 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1819 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1820 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1821 otx2_err("Outer IP and SCTP checksum unsupported");
1822 goto fail_configure;
1825 /* Free the resources allocated from the previous configure */
1826 if (dev->configured == 1) {
1827 otx2_eth_sec_fini(eth_dev);
1828 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1829 otx2_nix_vlan_fini(eth_dev);
1830 otx2_nix_mc_addr_list_uninstall(eth_dev);
1831 otx2_flow_free_all_resources(dev);
1832 oxt2_nix_unregister_queue_irqs(eth_dev);
1833 if (eth_dev->data->dev_conf.intr_conf.rxq)
1834 oxt2_nix_unregister_cq_irqs(eth_dev);
1835 nix_set_nop_rxtx_function(eth_dev);
1836 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1838 goto fail_configure;
1839 otx2_nix_tm_fini(eth_dev);
1843 dev->rx_offloads = rxmode->offloads;
1844 dev->tx_offloads = txmode->offloads;
1845 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1846 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1847 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1849 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1850 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1852 /* Alloc a nix lf */
1853 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1855 otx2_err("Failed to init nix_lf rc=%d", rc);
1859 otx2_nix_err_intr_enb_dis(eth_dev, true);
1860 otx2_nix_ras_intr_enb_dis(eth_dev, true);
1863 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
1864 otx2_err("Both PTP and switch header enabled");
1868 rc = nix_lf_switch_header_type_enable(dev, true);
1870 otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
1874 rc = nix_setup_lso_formats(dev);
1876 otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1881 rc = otx2_nix_rss_config(eth_dev);
1883 otx2_err("Failed to configure rss rc=%d", rc);
1887 /* Init the default TM scheduler hierarchy */
1888 rc = otx2_nix_tm_init_default(eth_dev);
1890 otx2_err("Failed to init traffic manager rc=%d", rc);
1894 rc = otx2_nix_vlan_offload_init(eth_dev);
1896 otx2_err("Failed to init vlan offload rc=%d", rc);
1900 /* Register queue IRQs */
1901 rc = oxt2_nix_register_queue_irqs(eth_dev);
1903 otx2_err("Failed to register queue interrupts rc=%d", rc);
1907 /* Register cq IRQs */
1908 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1909 if (eth_dev->data->nb_rx_queues > dev->cints) {
1910 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1914 /* Rx interrupt feature cannot work with vector mode because,
1915 * vector mode doesn't process packets unless min 4 pkts are
1916 * received, while cq interrupts are generated even for 1 pkt
1919 dev->scalar_ena = true;
1921 rc = oxt2_nix_register_cq_irqs(eth_dev);
1923 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1928 /* Configure loop back mode */
1929 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1931 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1935 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1937 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1941 /* Enable security */
1942 rc = otx2_eth_sec_init(eth_dev);
1946 rc = otx2_nix_flow_ctrl_init(eth_dev);
1948 otx2_err("Failed to init flow ctrl mode %d", rc);
1952 rc = otx2_nix_mc_addr_list_install(eth_dev);
1954 otx2_err("Failed to install mc address list rc=%d", rc);
1959 * Restore queue config when reconfigure followed by
1960 * reconfigure and no queue configure invoked from application case.
1962 if (dev->configured == 1) {
1963 rc = nix_restore_queue_cfg(eth_dev);
1965 goto uninstall_mc_list;
1968 /* Update the mac address */
1969 ea = eth_dev->data->mac_addrs;
1970 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1971 if (rte_is_zero_ether_addr(ea))
1972 rte_eth_random_addr((uint8_t *)ea);
1974 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1976 /* Apply new link configurations if changed */
1977 rc = otx2_apply_link_speed(eth_dev);
1979 otx2_err("Failed to set link configuration");
1980 goto uninstall_mc_list;
1983 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1984 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1985 " rx_flags=0x%x tx_flags=0x%x",
1986 eth_dev->data->port_id, ea_fmt, nb_rxq,
1987 nb_txq, dev->rx_offloads, dev->tx_offloads,
1988 dev->rx_offload_flags, dev->tx_offload_flags);
1991 dev->configured = 1;
1992 dev->configured_nb_rx_qs = data->nb_rx_queues;
1993 dev->configured_nb_tx_qs = data->nb_tx_queues;
1997 otx2_nix_mc_addr_list_uninstall(eth_dev);
1999 otx2_eth_sec_fini(eth_dev);
2001 oxt2_nix_unregister_cq_irqs(eth_dev);
2003 oxt2_nix_unregister_queue_irqs(eth_dev);
2005 otx2_nix_vlan_fini(eth_dev);
2007 otx2_nix_tm_fini(eth_dev);
2011 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
2012 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
2014 dev->configured = 0;
2019 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2021 struct rte_eth_dev_data *data = eth_dev->data;
2022 struct otx2_eth_txq *txq;
2025 txq = eth_dev->data->tx_queues[qidx];
2027 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2030 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
2032 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
2037 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2044 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2046 struct rte_eth_dev_data *data = eth_dev->data;
2047 struct otx2_eth_txq *txq;
2050 txq = eth_dev->data->tx_queues[qidx];
2052 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2055 txq->fc_cache_pkts = 0;
2057 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
2059 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
2064 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2071 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
2073 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2074 struct rte_eth_dev_data *data = eth_dev->data;
2077 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
2080 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
2082 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
2086 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
2093 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
2095 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
2096 struct rte_eth_dev_data *data = eth_dev->data;
2099 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
2102 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
2104 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
2108 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
2115 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
2117 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2118 struct rte_mbuf *rx_pkts[32];
2119 struct otx2_eth_rxq *rxq;
2120 int count, i, j, rc;
2122 nix_lf_switch_header_type_enable(dev, false);
2123 nix_cgx_stop_link_event(dev);
2124 npc_rx_disable(dev);
2126 /* Stop rx queues and free up pkts pending */
2127 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2128 rc = otx2_nix_rx_queue_stop(eth_dev, i);
2132 rxq = eth_dev->data->rx_queues[i];
2133 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2135 for (j = 0; j < count; j++)
2136 rte_pktmbuf_free(rx_pkts[j]);
2137 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
2141 /* Stop tx queues */
2142 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
2143 otx2_nix_tx_queue_stop(eth_dev, i);
2147 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
2149 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2152 /* MTU recalculate should be avoided here if PTP is enabled by PF, as
2153 * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
2156 if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
2157 rc = otx2_nix_recalc_mtu(eth_dev);
2162 /* Start rx queues */
2163 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2164 rc = otx2_nix_rx_queue_start(eth_dev, i);
2169 /* Start tx queues */
2170 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2171 rc = otx2_nix_tx_queue_start(eth_dev, i);
2176 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
2178 otx2_err("Failed to update flow ctrl mode %d", rc);
2182 /* Enable PTP if it was requested by the app or if it is already
2183 * enabled in PF owning this VF
2185 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
2186 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
2187 otx2_ethdev_is_ptp_en(dev))
2188 otx2_nix_timesync_enable(eth_dev);
2190 otx2_nix_timesync_disable(eth_dev);
2192 /* Update VF about data off shifted by 8 bytes if PTP already
2193 * enabled in PF owning this VF
2195 if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
2196 otx2_nix_ptp_enable_vf(eth_dev);
2198 rc = npc_rx_enable(dev);
2200 otx2_err("Failed to enable NPC rx %d", rc);
2204 otx2_nix_toggle_flag_link_cfg(dev, true);
2206 rc = nix_cgx_start_link_event(dev);
2208 otx2_err("Failed to start cgx link event %d", rc);
2212 otx2_nix_toggle_flag_link_cfg(dev, false);
2213 otx2_eth_set_tx_function(eth_dev);
2214 otx2_eth_set_rx_function(eth_dev);
2219 npc_rx_disable(dev);
2220 otx2_nix_toggle_flag_link_cfg(dev, false);
2224 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
2225 static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
2227 /* Initialize and register driver with DPDK Application */
2228 static const struct eth_dev_ops otx2_eth_dev_ops = {
2229 .dev_infos_get = otx2_nix_info_get,
2230 .dev_configure = otx2_nix_configure,
2231 .link_update = otx2_nix_link_update,
2232 .tx_queue_setup = otx2_nix_tx_queue_setup,
2233 .tx_queue_release = otx2_nix_tx_queue_release,
2234 .tm_ops_get = otx2_nix_tm_ops_get,
2235 .rx_queue_setup = otx2_nix_rx_queue_setup,
2236 .rx_queue_release = otx2_nix_rx_queue_release,
2237 .dev_start = otx2_nix_dev_start,
2238 .dev_stop = otx2_nix_dev_stop,
2239 .dev_close = otx2_nix_dev_close,
2240 .tx_queue_start = otx2_nix_tx_queue_start,
2241 .tx_queue_stop = otx2_nix_tx_queue_stop,
2242 .rx_queue_start = otx2_nix_rx_queue_start,
2243 .rx_queue_stop = otx2_nix_rx_queue_stop,
2244 .dev_set_link_up = otx2_nix_dev_set_link_up,
2245 .dev_set_link_down = otx2_nix_dev_set_link_down,
2246 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
2247 .dev_ptypes_set = otx2_nix_ptypes_set,
2248 .dev_reset = otx2_nix_dev_reset,
2249 .stats_get = otx2_nix_dev_stats_get,
2250 .stats_reset = otx2_nix_dev_stats_reset,
2251 .get_reg = otx2_nix_dev_get_reg,
2252 .mtu_set = otx2_nix_mtu_set,
2253 .mac_addr_add = otx2_nix_mac_addr_add,
2254 .mac_addr_remove = otx2_nix_mac_addr_del,
2255 .mac_addr_set = otx2_nix_mac_addr_set,
2256 .set_mc_addr_list = otx2_nix_set_mc_addr_list,
2257 .promiscuous_enable = otx2_nix_promisc_enable,
2258 .promiscuous_disable = otx2_nix_promisc_disable,
2259 .allmulticast_enable = otx2_nix_allmulticast_enable,
2260 .allmulticast_disable = otx2_nix_allmulticast_disable,
2261 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
2262 .reta_update = otx2_nix_dev_reta_update,
2263 .reta_query = otx2_nix_dev_reta_query,
2264 .rss_hash_update = otx2_nix_rss_hash_update,
2265 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
2266 .xstats_get = otx2_nix_xstats_get,
2267 .xstats_get_names = otx2_nix_xstats_get_names,
2268 .xstats_reset = otx2_nix_xstats_reset,
2269 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
2270 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
2271 .rxq_info_get = otx2_nix_rxq_info_get,
2272 .txq_info_get = otx2_nix_txq_info_get,
2273 .rx_burst_mode_get = otx2_rx_burst_mode_get,
2274 .tx_burst_mode_get = otx2_tx_burst_mode_get,
2275 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
2276 .set_queue_rate_limit = otx2_nix_tm_set_queue_rate_limit,
2277 .pool_ops_supported = otx2_nix_pool_ops_supported,
2278 .filter_ctrl = otx2_nix_dev_filter_ctrl,
2279 .get_module_info = otx2_nix_get_module_info,
2280 .get_module_eeprom = otx2_nix_get_module_eeprom,
2281 .fw_version_get = otx2_nix_fw_version_get,
2282 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
2283 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
2284 .timesync_enable = otx2_nix_timesync_enable,
2285 .timesync_disable = otx2_nix_timesync_disable,
2286 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
2287 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
2288 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
2289 .timesync_read_time = otx2_nix_timesync_read_time,
2290 .timesync_write_time = otx2_nix_timesync_write_time,
2291 .vlan_offload_set = otx2_nix_vlan_offload_set,
2292 .vlan_filter_set = otx2_nix_vlan_filter_set,
2293 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
2294 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
2295 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
2296 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
2297 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
2298 .read_clock = otx2_nix_read_clock,
2302 nix_lf_attach(struct otx2_eth_dev *dev)
2304 struct otx2_mbox *mbox = dev->mbox;
2305 struct rsrc_attach_req *req;
2307 /* Attach NIX(lf) */
2308 req = otx2_mbox_alloc_msg_attach_resources(mbox);
2312 return otx2_mbox_process(mbox);
2316 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
2318 struct otx2_mbox *mbox = dev->mbox;
2319 struct msix_offset_rsp *msix_rsp;
2322 /* Get NPA and NIX MSIX vector offsets */
2323 otx2_mbox_alloc_msg_msix_offset(mbox);
2325 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
2327 dev->nix_msixoff = msix_rsp->nix_msixoff;
2333 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
2335 struct rsrc_detach_req *req;
2337 req = otx2_mbox_alloc_msg_detach_resources(mbox);
2339 /* Detach all except npa lf */
2340 req->partial = true;
2347 return otx2_mbox_process(mbox);
2351 otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
2353 if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
2354 pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2359 static inline uint64_t
2360 nix_get_blkaddr(struct otx2_eth_dev *dev)
2364 /* Reading the discovery register to know which NIX is the LF
2367 reg = otx2_read64(dev->bar2 +
2368 RVU_PF_BLOCK_ADDRX_DISC(RVU_BLOCK_ADDR_NIX0));
2370 return reg & 0x1FFULL ? RVU_BLOCK_ADDR_NIX0 : RVU_BLOCK_ADDR_NIX1;
2374 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
2376 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2377 struct rte_pci_device *pci_dev;
2378 int rc, max_entries;
2380 eth_dev->dev_ops = &otx2_eth_dev_ops;
2381 eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
2382 eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
2383 eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
2384 eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
2386 /* For secondary processes, the primary has done all the work */
2387 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2388 /* Setup callbacks for secondary process */
2389 otx2_eth_set_tx_function(eth_dev);
2390 otx2_eth_set_rx_function(eth_dev);
2394 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2396 rte_eth_copy_pci_info(eth_dev, pci_dev);
2397 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2399 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
2400 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
2401 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
2403 /* Parse devargs string */
2404 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
2406 otx2_err("Failed to parse devargs rc=%d", rc);
2410 if (!dev->mbox_active) {
2411 /* Initialize the base otx2_dev object
2412 * only if already present
2414 rc = otx2_dev_init(pci_dev, dev);
2416 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
2420 if (otx2_eth_dev_is_sdp(pci_dev))
2421 dev->sdp_link = true;
2423 dev->sdp_link = false;
2424 /* Device generic callbacks */
2425 dev->ops = &otx2_dev_ops;
2426 dev->eth_dev = eth_dev;
2428 /* Grab the NPA LF if required */
2429 rc = otx2_npa_lf_init(pci_dev, dev);
2431 goto otx2_dev_uninit;
2433 dev->configured = 0;
2434 dev->drv_inited = true;
2435 dev->ptype_disable = 0;
2436 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2439 rc = nix_lf_attach(dev);
2441 goto otx2_npa_uninit;
2443 dev->base = dev->bar2 + (nix_get_blkaddr(dev) << 20);
2445 /* Get NIX MSIX offset */
2446 rc = nix_lf_get_msix_offset(dev);
2448 goto otx2_npa_uninit;
2450 /* Register LF irq handlers */
2451 rc = otx2_nix_register_irqs(eth_dev);
2455 /* Get maximum number of supported MAC entries */
2456 max_entries = otx2_cgx_mac_max_entries_get(dev);
2457 if (max_entries < 0) {
2458 otx2_err("Failed to get max entries for mac addr");
2460 goto unregister_irq;
2463 /* For VFs, returned max_entries will be 0. But to keep default MAC
2464 * address, one entry must be allocated. So setting up to 1.
2466 if (max_entries == 0)
2469 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2470 RTE_ETHER_ADDR_LEN, 0);
2471 if (eth_dev->data->mac_addrs == NULL) {
2472 otx2_err("Failed to allocate memory for mac addr");
2474 goto unregister_irq;
2477 dev->max_mac_entries = max_entries;
2479 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2481 goto free_mac_addrs;
2483 /* Update the mac address */
2484 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2486 /* Also sync same MAC address to CGX table */
2487 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
2489 /* Initialize the tm data structures */
2490 otx2_nix_tm_conf_init(eth_dev);
2492 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2493 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2495 if (otx2_dev_is_96xx_A0(dev) ||
2496 otx2_dev_is_95xx_Ax(dev)) {
2497 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2498 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2501 /* Create security ctx */
2502 rc = otx2_eth_sec_ctx_create(eth_dev);
2504 goto free_mac_addrs;
2505 dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2506 dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
2508 /* Initialize rte-flow */
2509 rc = otx2_flow_init(dev);
2511 goto sec_ctx_destroy;
2513 otx2_nix_mc_filter_init(dev);
2515 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2516 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2517 eth_dev->data->port_id, dev->pf, dev->vf,
2518 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2519 dev->rx_offload_capa, dev->tx_offload_capa);
2523 otx2_eth_sec_ctx_destroy(eth_dev);
2525 rte_free(eth_dev->data->mac_addrs);
2527 otx2_nix_unregister_irqs(eth_dev);
2529 otx2_eth_dev_lf_detach(dev->mbox);
2533 otx2_dev_fini(pci_dev, dev);
2535 otx2_err("Failed to init nix eth_dev rc=%d", rc);
2540 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2542 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2543 struct rte_pci_device *pci_dev;
2546 /* Nothing to be done for secondary processes */
2547 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2550 /* Clear the flag since we are closing down */
2551 dev->configured = 0;
2553 /* Disable nix bpid config */
2554 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2556 npc_rx_disable(dev);
2558 /* Disable vlan offloads */
2559 otx2_nix_vlan_fini(eth_dev);
2561 /* Disable other rte_flow entries */
2562 otx2_flow_fini(dev);
2564 /* Free multicast filter list */
2565 otx2_nix_mc_filter_fini(dev);
2567 /* Disable PTP if already enabled */
2568 if (otx2_ethdev_is_ptp_en(dev))
2569 otx2_nix_timesync_disable(eth_dev);
2571 nix_cgx_stop_link_event(dev);
2574 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2575 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
2576 eth_dev->data->tx_queues[i] = NULL;
2578 eth_dev->data->nb_tx_queues = 0;
2580 /* Free up RQ's and CQ's */
2581 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2582 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
2583 eth_dev->data->rx_queues[i] = NULL;
2585 eth_dev->data->nb_rx_queues = 0;
2587 /* Free tm resources */
2588 rc = otx2_nix_tm_fini(eth_dev);
2590 otx2_err("Failed to cleanup tm, rc=%d", rc);
2592 /* Unregister queue irqs */
2593 oxt2_nix_unregister_queue_irqs(eth_dev);
2595 /* Unregister cq irqs */
2596 if (eth_dev->data->dev_conf.intr_conf.rxq)
2597 oxt2_nix_unregister_cq_irqs(eth_dev);
2599 rc = nix_lf_free(dev);
2601 otx2_err("Failed to free nix lf, rc=%d", rc);
2603 rc = otx2_npa_lf_fini();
2605 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2607 /* Disable security */
2608 otx2_eth_sec_fini(eth_dev);
2610 /* Destroy security ctx */
2611 otx2_eth_sec_ctx_destroy(eth_dev);
2613 rte_free(eth_dev->data->mac_addrs);
2614 eth_dev->data->mac_addrs = NULL;
2615 dev->drv_inited = false;
2617 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2618 otx2_nix_unregister_irqs(eth_dev);
2620 rc = otx2_eth_dev_lf_detach(dev->mbox);
2622 otx2_err("Failed to detach resources, rc=%d", rc);
2624 /* Check if mbox close is needed */
2628 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2629 /* Will be freed later by PMD */
2630 eth_dev->data->dev_private = NULL;
2634 otx2_dev_fini(pci_dev, dev);
2639 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2641 otx2_eth_dev_uninit(eth_dev, true);
2645 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2649 rc = otx2_eth_dev_uninit(eth_dev, false);
2653 return otx2_eth_dev_init(eth_dev);
2657 nix_remove(struct rte_pci_device *pci_dev)
2659 struct rte_eth_dev *eth_dev;
2660 struct otx2_idev_cfg *idev;
2661 struct otx2_dev *otx2_dev;
2664 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2666 /* Cleanup eth dev */
2667 rc = otx2_eth_dev_uninit(eth_dev, true);
2671 rte_eth_dev_pci_release(eth_dev);
2674 /* Nothing to be done for secondary processes */
2675 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2678 /* Check for common resources */
2679 idev = otx2_intra_dev_get_cfg();
2680 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2683 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2685 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2688 /* Safe to cleanup mbox as no more users */
2689 otx2_dev_fini(pci_dev, otx2_dev);
2694 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2699 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2703 RTE_SET_USED(pci_drv);
2705 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2708 /* On error on secondary, recheck if port exists in primary or
2709 * in mid of detach state.
2711 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2712 if (!rte_eth_dev_allocated(pci_dev->device.name))
2717 static const struct rte_pci_id pci_nix_map[] = {
2719 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2722 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2725 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2726 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2729 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2730 PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
2733 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2734 PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2741 static struct rte_pci_driver pci_nix = {
2742 .id_table = pci_nix_map,
2743 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2744 RTE_PCI_DRV_INTR_LSC,
2746 .remove = nix_remove,
2749 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
2750 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
2751 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");