1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
16 static inline uint64_t
17 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
19 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
21 if (otx2_dev_is_vf(dev))
22 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
27 static inline uint64_t
28 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
32 return NIX_TX_OFFLOAD_CAPA;
35 static const struct otx2_dev_ops otx2_dev_ops = {
36 .link_status_update = otx2_eth_dev_link_status_update,
37 .ptp_info_update = otx2_eth_dev_ptp_info_update
41 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
43 struct otx2_mbox *mbox = dev->mbox;
44 struct nix_lf_alloc_req *req;
45 struct nix_lf_alloc_rsp *rsp;
48 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
52 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
53 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
54 req->xqe_sz = NIX_XQESZ_W16;
55 req->rss_sz = dev->rss_info.rss_size;
56 req->rss_grps = NIX_RSS_GRPS;
57 req->npa_func = otx2_npa_pf_func_get();
58 req->sso_func = otx2_sso_pf_func_get();
59 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
60 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
61 DEV_RX_OFFLOAD_UDP_CKSUM)) {
62 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
63 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
66 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
70 dev->sqb_size = rsp->sqb_size;
71 dev->tx_chan_base = rsp->tx_chan_base;
72 dev->rx_chan_base = rsp->rx_chan_base;
73 dev->rx_chan_cnt = rsp->rx_chan_cnt;
74 dev->tx_chan_cnt = rsp->tx_chan_cnt;
75 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
76 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
77 dev->lf_tx_stats = rsp->lf_tx_stats;
78 dev->lf_rx_stats = rsp->lf_rx_stats;
79 dev->cints = rsp->cints;
80 dev->qints = rsp->qints;
81 dev->npc_flow.channel = dev->rx_chan_base;
87 nix_lf_free(struct otx2_eth_dev *dev)
89 struct otx2_mbox *mbox = dev->mbox;
90 struct nix_lf_free_req *req;
91 struct ndc_sync_op *ndc_req;
94 /* Sync NDC-NIX for LF */
95 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
96 ndc_req->nix_lf_tx_sync = 1;
97 ndc_req->nix_lf_rx_sync = 1;
98 rc = otx2_mbox_process(mbox);
100 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
102 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
103 /* Let AF driver free all this nix lf's
104 * NPC entries allocated using NPC MBOX.
108 return otx2_mbox_process(mbox);
112 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
114 struct otx2_mbox *mbox = dev->mbox;
116 if (otx2_dev_is_vf(dev))
119 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
121 return otx2_mbox_process(mbox);
125 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
127 struct otx2_mbox *mbox = dev->mbox;
129 if (otx2_dev_is_vf(dev))
132 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
134 return otx2_mbox_process(mbox);
138 npc_rx_enable(struct otx2_eth_dev *dev)
140 struct otx2_mbox *mbox = dev->mbox;
142 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
144 return otx2_mbox_process(mbox);
148 npc_rx_disable(struct otx2_eth_dev *dev)
150 struct otx2_mbox *mbox = dev->mbox;
152 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
154 return otx2_mbox_process(mbox);
158 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
160 struct otx2_mbox *mbox = dev->mbox;
162 if (otx2_dev_is_vf(dev))
165 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
167 return otx2_mbox_process(mbox);
171 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
173 struct otx2_mbox *mbox = dev->mbox;
175 if (otx2_dev_is_vf(dev))
179 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
181 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
183 return otx2_mbox_process(mbox);
187 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
189 struct otx2_mbox *mbox = dev->mbox;
191 if (otx2_dev_is_vf(dev))
194 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
196 return otx2_mbox_process(mbox);
200 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
206 static inline uint32_t
207 nix_qsize_to_val(enum nix_q_size_e qsize)
209 return (16UL << (qsize * 2));
212 static inline enum nix_q_size_e
213 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
217 if (otx2_ethdev_fixup_is_min_4k_q(dev))
222 for (; i < nix_q_size_max; i++)
223 if (val <= nix_qsize_to_val(i))
226 if (i >= nix_q_size_max)
227 i = nix_q_size_max - 1;
233 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
234 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
236 struct otx2_mbox *mbox = dev->mbox;
237 const struct rte_memzone *rz;
238 uint32_t ring_size, cq_size;
239 struct nix_aq_enq_req *aq;
244 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
245 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
246 NIX_CQ_ALIGN, dev->node);
248 otx2_err("Failed to allocate mem for cq hw ring");
252 memset(rz->addr, 0, rz->len);
253 rxq->desc = (uintptr_t)rz->addr;
254 rxq->qmask = cq_size - 1;
256 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
258 aq->ctype = NIX_AQ_CTYPE_CQ;
259 aq->op = NIX_AQ_INSTOP_INIT;
263 aq->cq.qsize = rxq->qsize;
264 aq->cq.base = rz->iova;
265 aq->cq.avg_level = 0xff;
266 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
267 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
269 /* TX pause frames enable flowctrl on RX side */
270 if (dev->fc_info.tx_pause) {
271 /* Single bpid is allocated for all rx channels for now */
272 aq->cq.bpid = dev->fc_info.bpid[0];
273 aq->cq.bp = NIX_CQ_BP_LEVEL;
277 /* Many to one reduction */
278 aq->cq.qint_idx = qid % dev->qints;
279 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
280 aq->cq.cint_idx = qid;
282 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
283 uint16_t min_rx_drop;
284 const float rx_cq_skid = 1024 * 256;
286 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
287 aq->cq.drop = min_rx_drop;
291 rc = otx2_mbox_process(mbox);
293 otx2_err("Failed to init cq context");
297 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
299 aq->ctype = NIX_AQ_CTYPE_RQ;
300 aq->op = NIX_AQ_INSTOP_INIT;
303 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
305 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
306 first_skip = (sizeof(struct rte_mbuf));
307 first_skip += RTE_PKTMBUF_HEADROOM;
308 first_skip += rte_pktmbuf_priv_size(mp);
309 rxq->data_off = first_skip;
311 first_skip /= 8; /* Expressed in number of dwords */
312 aq->rq.first_skip = first_skip;
313 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
314 aq->rq.flow_tagw = 32; /* 32-bits */
315 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
316 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
317 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
318 aq->rq.lpb_sizem1 /= 8;
319 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
321 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
322 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
323 aq->rq.rq_int_ena = 0;
324 /* Many to one reduction */
325 aq->rq.qint_idx = qid % dev->qints;
327 if (otx2_ethdev_fixup_is_limit_cq_full(dev))
328 aq->rq.xqe_drop_ena = 1;
330 rc = otx2_mbox_process(mbox);
332 otx2_err("Failed to init rq context");
342 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
343 struct otx2_eth_rxq *rxq, const bool enb)
345 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
346 struct otx2_mbox *mbox = dev->mbox;
347 struct nix_aq_enq_req *aq;
349 /* Pkts will be dropped silently if RQ is disabled */
350 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
352 aq->ctype = NIX_AQ_CTYPE_RQ;
353 aq->op = NIX_AQ_INSTOP_WRITE;
356 aq->rq_mask.ena = ~(aq->rq_mask.ena);
358 return otx2_mbox_process(mbox);
362 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
364 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
365 struct otx2_mbox *mbox = dev->mbox;
366 struct nix_aq_enq_req *aq;
369 /* RQ is already disabled */
371 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
373 aq->ctype = NIX_AQ_CTYPE_CQ;
374 aq->op = NIX_AQ_INSTOP_WRITE;
377 aq->cq_mask.ena = ~(aq->cq_mask.ena);
379 rc = otx2_mbox_process(mbox);
381 otx2_err("Failed to disable cq context");
389 nix_get_data_off(struct otx2_eth_dev *dev)
391 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
395 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
397 struct rte_mbuf mb_def;
400 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
401 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
402 offsetof(struct rte_mbuf, data_off) != 2);
403 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
404 offsetof(struct rte_mbuf, data_off) != 4);
405 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
406 offsetof(struct rte_mbuf, data_off) != 6);
408 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
409 mb_def.port = port_id;
410 rte_mbuf_refcnt_set(&mb_def, 1);
412 /* Prevent compiler reordering: rearm_data covers previous fields */
413 rte_compiler_barrier();
414 tmp = (uint64_t *)&mb_def.rearm_data;
420 otx2_nix_rx_queue_release(void *rx_queue)
422 struct otx2_eth_rxq *rxq = rx_queue;
427 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
428 nix_cq_rq_uninit(rxq->eth_dev, rxq);
433 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
434 uint16_t nb_desc, unsigned int socket,
435 const struct rte_eth_rxconf *rx_conf,
436 struct rte_mempool *mp)
438 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
439 struct rte_mempool_ops *ops;
440 struct otx2_eth_rxq *rxq;
441 const char *platform_ops;
442 enum nix_q_size_e qsize;
448 /* Compile time check to make sure all fast path elements in a CL */
449 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
452 if (rx_conf->rx_deferred_start == 1) {
453 otx2_err("Deferred Rx start is not supported");
457 platform_ops = rte_mbuf_platform_mempool_ops();
458 /* This driver needs octeontx2_npa mempool ops to work */
459 ops = rte_mempool_get_ops(mp->ops_index);
460 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
461 otx2_err("mempool ops should be of octeontx2_npa type");
465 if (mp->pool_id == 0) {
466 otx2_err("Invalid pool_id");
470 /* Free memory prior to re-allocation if needed */
471 if (eth_dev->data->rx_queues[rq] != NULL) {
472 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
473 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
474 eth_dev->data->rx_queues[rq] = NULL;
477 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
478 dev->rx_offloads |= offloads;
480 /* Find the CQ queue size */
481 qsize = nix_qsize_clampup_get(dev, nb_desc);
482 /* Allocate rxq memory */
483 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
485 otx2_err("Failed to allocate rq=%d", rq);
490 rxq->eth_dev = eth_dev;
492 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
493 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
494 rxq->wdata = (uint64_t)rq << 32;
495 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
496 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
497 eth_dev->data->port_id);
498 rxq->offloads = offloads;
500 rxq->qlen = nix_qsize_to_val(qsize);
502 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
503 rxq->tstamp = &dev->tstamp;
505 /* Alloc completion queue */
506 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
508 otx2_err("Failed to allocate rxq=%u", rq);
512 rxq->qconf.socket_id = socket;
513 rxq->qconf.nb_desc = nb_desc;
514 rxq->qconf.mempool = mp;
515 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
517 nix_rx_queue_reset(rxq);
518 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
519 rq, mp->name, qsize, nb_desc, rxq->qlen);
521 eth_dev->data->rx_queues[rq] = rxq;
522 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
524 /* Calculating delta and freq mult between PTP HI clock and tsc.
525 * These are needed in deriving raw clock value from tsc counter.
526 * read_clock eth op returns raw clock value.
528 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
529 otx2_ethdev_is_ptp_en(dev)) {
530 rc = otx2_nix_raw_clock_tsc_conv(dev);
532 otx2_err("Failed to calculate delta and freq mult");
540 otx2_nix_rx_queue_release(rxq);
545 static inline uint8_t
546 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
549 * Maximum three segments can be supported with W8, Choose
550 * NIX_MAXSQESZ_W16 for multi segment offload.
552 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
553 return NIX_MAXSQESZ_W16;
555 return NIX_MAXSQESZ_W8;
559 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
561 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
562 struct rte_eth_dev_data *data = eth_dev->data;
563 struct rte_eth_conf *conf = &data->dev_conf;
564 struct rte_eth_rxmode *rxmode = &conf->rxmode;
567 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
568 flags |= NIX_RX_OFFLOAD_RSS_F;
570 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
571 DEV_RX_OFFLOAD_UDP_CKSUM))
572 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
574 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
575 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
576 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
578 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
579 flags |= NIX_RX_MULTI_SEG_F;
581 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
582 DEV_RX_OFFLOAD_QINQ_STRIP))
583 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
585 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
586 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
592 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
594 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
595 uint64_t conf = dev->tx_offloads;
598 /* Fastpath is dependent on these enums */
599 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
600 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
601 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
602 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
603 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
604 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
605 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
606 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
607 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
608 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
609 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
610 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
611 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
612 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
613 offsetof(struct rte_mbuf, buf_iova) + 8);
614 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
615 offsetof(struct rte_mbuf, buf_iova) + 16);
616 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
617 offsetof(struct rte_mbuf, ol_flags) + 12);
618 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
619 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
621 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
622 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
623 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
625 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
626 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
627 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
629 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
630 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
631 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
632 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
633 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
635 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
636 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
638 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
639 flags |= NIX_TX_MULTI_SEG_F;
641 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
642 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
648 nix_sq_init(struct otx2_eth_txq *txq)
650 struct otx2_eth_dev *dev = txq->dev;
651 struct otx2_mbox *mbox = dev->mbox;
652 struct nix_aq_enq_req *sq;
657 if (txq->sqb_pool->pool_id == 0)
660 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
662 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
666 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
668 sq->ctype = NIX_AQ_CTYPE_SQ;
669 sq->op = NIX_AQ_INSTOP_INIT;
670 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
673 sq->sq.smq_rr_quantum = rr_quantum;
674 sq->sq.default_chan = dev->tx_chan_base;
675 sq->sq.sqe_stype = NIX_STYPE_STF;
677 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
678 sq->sq.sqe_stype = NIX_STYPE_STP;
680 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
681 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
682 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
683 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
684 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
686 /* Many to one reduction */
687 sq->sq.qint_idx = txq->sq % dev->qints;
689 return otx2_mbox_process(mbox);
693 nix_sq_uninit(struct otx2_eth_txq *txq)
695 struct otx2_eth_dev *dev = txq->dev;
696 struct otx2_mbox *mbox = dev->mbox;
697 struct ndc_sync_op *ndc_req;
698 struct nix_aq_enq_rsp *rsp;
699 struct nix_aq_enq_req *aq;
700 uint16_t sqes_per_sqb;
704 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
706 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
708 aq->ctype = NIX_AQ_CTYPE_SQ;
709 aq->op = NIX_AQ_INSTOP_READ;
711 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
715 /* Check if sq is already cleaned up */
720 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
722 aq->ctype = NIX_AQ_CTYPE_SQ;
723 aq->op = NIX_AQ_INSTOP_WRITE;
725 aq->sq_mask.ena = ~aq->sq_mask.ena;
728 rc = otx2_mbox_process(mbox);
732 /* Read SQ and free sqb's */
733 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
735 aq->ctype = NIX_AQ_CTYPE_SQ;
736 aq->op = NIX_AQ_INSTOP_READ;
738 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
743 otx2_err("SQ has pending sqe's");
745 count = aq->sq.sqb_count;
746 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
747 /* Free SQB's that are used */
748 sqb_buf = (void *)rsp->sq.head_sqb;
752 next_sqb = *(void **)((uintptr_t)sqb_buf + ((sqes_per_sqb - 1) *
753 nix_sq_max_sqe_sz(txq)));
754 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
760 /* Free next to use sqb */
761 if (rsp->sq.next_sqb)
762 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
765 /* Sync NDC-NIX-TX for LF */
766 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
767 ndc_req->nix_lf_tx_sync = 1;
768 rc = otx2_mbox_process(mbox);
770 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
776 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
778 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
779 struct npa_aq_enq_req *aura_req;
781 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
782 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
783 aura_req->ctype = NPA_AQ_CTYPE_AURA;
784 aura_req->op = NPA_AQ_INSTOP_WRITE;
786 aura_req->aura.limit = nb_sqb_bufs;
787 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
789 return otx2_mbox_process(npa_lf->mbox);
793 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
795 struct otx2_eth_dev *dev = txq->dev;
796 uint16_t sqes_per_sqb, nb_sqb_bufs;
797 char name[RTE_MEMPOOL_NAMESIZE];
798 struct rte_mempool_objsz sz;
799 struct npa_aura_s *aura;
800 uint32_t tmp, blk_sz;
802 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
803 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
804 blk_sz = dev->sqb_size;
806 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
807 sqes_per_sqb = (dev->sqb_size / 8) / 16;
809 sqes_per_sqb = (dev->sqb_size / 8) / 8;
811 nb_sqb_bufs = nb_desc / sqes_per_sqb;
812 /* Clamp up to devarg passed SQB count */
813 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_MIN_SQB,
814 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
816 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
818 MEMPOOL_F_NO_SPREAD);
819 txq->nb_sqb_bufs = nb_sqb_bufs;
820 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
821 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
822 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
823 txq->nb_sqb_bufs_adj =
824 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
826 if (txq->sqb_pool == NULL) {
827 otx2_err("Failed to allocate sqe mempool");
831 memset(aura, 0, sizeof(*aura));
833 aura->fc_addr = txq->fc_iova;
834 aura->fc_hyst_bits = 0; /* Store count on all updates */
835 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
836 otx2_err("Failed to set ops for sqe mempool");
839 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
840 otx2_err("Failed to populate sqe mempool");
844 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
845 if (dev->sqb_size != sz.elt_size) {
846 otx2_err("sqe pool block size is not expected %d != %d",
851 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
859 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
861 struct nix_send_ext_s *send_hdr_ext;
862 struct nix_send_hdr_s *send_hdr;
863 struct nix_send_mem_s *send_mem;
864 union nix_send_sg_s *sg;
866 /* Initialize the fields based on basic single segment packet */
867 memset(&txq->cmd, 0, sizeof(txq->cmd));
869 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
870 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
871 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
872 send_hdr->w0.sizem1 = 2;
874 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
875 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
876 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
877 /* Default: one seg packet would have:
878 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
881 send_hdr->w0.sizem1 = 3;
882 send_hdr_ext->w0.tstmp = 1;
884 /* To calculate the offset for send_mem,
885 * send_hdr->w0.sizem1 * 2
887 send_mem = (struct nix_send_mem_s *)(txq->cmd +
888 (send_hdr->w0.sizem1 << 1));
889 send_mem->subdc = NIX_SUBDC_MEM;
890 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
891 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
893 sg = (union nix_send_sg_s *)&txq->cmd[4];
895 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
896 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
897 send_hdr->w0.sizem1 = 1;
898 sg = (union nix_send_sg_s *)&txq->cmd[2];
901 send_hdr->w0.sq = txq->sq;
902 sg->subdc = NIX_SUBDC_SG;
904 sg->ld_type = NIX_SENDLDTYPE_LDD;
910 otx2_nix_tx_queue_release(void *_txq)
912 struct otx2_eth_txq *txq = _txq;
913 struct rte_eth_dev *eth_dev;
918 eth_dev = txq->dev->eth_dev;
920 otx2_nix_dbg("Releasing txq %u", txq->sq);
922 /* Flush and disable tm */
923 otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
925 /* Free sqb's and disable sq */
929 rte_mempool_free(txq->sqb_pool);
930 txq->sqb_pool = NULL;
937 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
938 uint16_t nb_desc, unsigned int socket_id,
939 const struct rte_eth_txconf *tx_conf)
941 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
942 const struct rte_memzone *fc;
943 struct otx2_eth_txq *txq;
949 /* Compile time check to make sure all fast path elements in a CL */
950 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
952 if (tx_conf->tx_deferred_start) {
953 otx2_err("Tx deferred start is not supported");
957 /* Free memory prior to re-allocation if needed. */
958 if (eth_dev->data->tx_queues[sq] != NULL) {
959 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
960 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
961 eth_dev->data->tx_queues[sq] = NULL;
964 /* Find the expected offloads for this queue */
965 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
967 /* Allocating tx queue data structure */
968 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
969 OTX2_ALIGN, socket_id);
971 otx2_err("Failed to alloc txq=%d", sq);
977 txq->sqb_pool = NULL;
978 txq->offloads = offloads;
979 dev->tx_offloads |= offloads;
982 * Allocate memory for flow control updates from HW.
983 * Alloc one cache line, so that fits all FC_STYPE modes.
985 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
986 OTX2_ALIGN + sizeof(struct npa_aura_s),
987 OTX2_ALIGN, dev->node);
989 otx2_err("Failed to allocate mem for fcmem");
993 txq->fc_iova = fc->iova;
994 txq->fc_mem = fc->addr;
996 /* Initialize the aura sqb pool */
997 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
999 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1003 /* Initialize the SQ */
1004 rc = nix_sq_init(txq);
1006 otx2_err("Failed to init sq=%d context", sq);
1010 txq->fc_cache_pkts = 0;
1011 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1012 /* Evenly distribute LMT slot for each sq */
1013 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1015 txq->qconf.socket_id = socket_id;
1016 txq->qconf.nb_desc = nb_desc;
1017 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1019 otx2_nix_form_default_desc(txq);
1021 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1022 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1023 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1024 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1025 eth_dev->data->tx_queues[sq] = txq;
1026 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1030 otx2_nix_tx_queue_release(txq);
1036 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1038 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1039 struct otx2_eth_qconf *tx_qconf = NULL;
1040 struct otx2_eth_qconf *rx_qconf = NULL;
1041 struct otx2_eth_txq **txq;
1042 struct otx2_eth_rxq **rxq;
1043 int i, nb_rxq, nb_txq;
1045 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1046 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1048 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1049 if (tx_qconf == NULL) {
1050 otx2_err("Failed to allocate memory for tx_qconf");
1054 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1055 if (rx_qconf == NULL) {
1056 otx2_err("Failed to allocate memory for rx_qconf");
1060 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1061 for (i = 0; i < nb_txq; i++) {
1062 if (txq[i] == NULL) {
1063 otx2_err("txq[%d] is already released", i);
1066 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1067 otx2_nix_tx_queue_release(txq[i]);
1068 eth_dev->data->tx_queues[i] = NULL;
1071 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1072 for (i = 0; i < nb_rxq; i++) {
1073 if (rxq[i] == NULL) {
1074 otx2_err("rxq[%d] is already released", i);
1077 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1078 otx2_nix_rx_queue_release(rxq[i]);
1079 eth_dev->data->rx_queues[i] = NULL;
1082 dev->tx_qconf = tx_qconf;
1083 dev->rx_qconf = rx_qconf;
1096 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1098 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1099 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1100 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1101 struct otx2_eth_txq **txq;
1102 struct otx2_eth_rxq **rxq;
1103 int rc, i, nb_rxq, nb_txq;
1105 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1106 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1109 /* Setup tx & rx queues with previous configuration so
1110 * that the queues can be functional in cases like ports
1111 * are started without re configuring queues.
1113 * Usual re config sequence is like below:
1114 * port_configure() {
1119 * queue_configure() {
1126 * In some application's control path, queue_configure() would
1127 * NOT be invoked for TXQs/RXQs in port_configure().
1128 * In such cases, queues can be functional after start as the
1129 * queues are already setup in port_configure().
1131 for (i = 0; i < nb_txq; i++) {
1132 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1133 tx_qconf[i].socket_id,
1134 &tx_qconf[i].conf.tx);
1136 otx2_err("Failed to setup tx queue rc=%d", rc);
1137 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1138 for (i -= 1; i >= 0; i--)
1139 otx2_nix_tx_queue_release(txq[i]);
1144 free(tx_qconf); tx_qconf = NULL;
1146 for (i = 0; i < nb_rxq; i++) {
1147 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1148 rx_qconf[i].socket_id,
1149 &rx_qconf[i].conf.rx,
1150 rx_qconf[i].mempool);
1152 otx2_err("Failed to setup rx queue rc=%d", rc);
1153 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1154 for (i -= 1; i >= 0; i--)
1155 otx2_nix_rx_queue_release(rxq[i]);
1156 goto release_tx_queues;
1160 free(rx_qconf); rx_qconf = NULL;
1165 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1166 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1167 otx2_nix_tx_queue_release(txq[i]);
1178 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1180 RTE_SET_USED(queue);
1181 RTE_SET_USED(mbufs);
1188 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1190 /* These dummy functions are required for supporting
1191 * some applications which reconfigure queues without
1192 * stopping tx burst and rx burst threads(eg kni app)
1193 * When the queues context is saved, txq/rxqs are released
1194 * which caused app crash since rx/tx burst is still
1195 * on different lcores
1197 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1198 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1203 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1205 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1206 struct rte_eth_dev_data *data = eth_dev->data;
1207 struct rte_eth_conf *conf = &data->dev_conf;
1208 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1209 struct rte_eth_txmode *txmode = &conf->txmode;
1210 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1211 struct rte_ether_addr *ea;
1212 uint8_t nb_rxq, nb_txq;
1218 if (rte_eal_has_hugepages() == 0) {
1219 otx2_err("Huge page is not configured");
1220 goto fail_configure;
1223 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1224 otx2_err("Setting link speed/duplex not supported");
1225 goto fail_configure;
1228 if (conf->dcb_capability_en == 1) {
1229 otx2_err("dcb enable is not supported");
1230 goto fail_configure;
1233 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1234 otx2_err("Flow director is not supported");
1235 goto fail_configure;
1238 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1239 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1240 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1241 goto fail_configure;
1244 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1245 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1246 goto fail_configure;
1249 if (otx2_dev_is_Ax(dev) &&
1250 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1251 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1252 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1253 otx2_err("Outer IP and SCTP checksum unsupported");
1254 goto fail_configure;
1257 /* Free the resources allocated from the previous configure */
1258 if (dev->configured == 1) {
1259 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1260 otx2_nix_vlan_fini(eth_dev);
1261 otx2_flow_free_all_resources(dev);
1262 oxt2_nix_unregister_queue_irqs(eth_dev);
1263 if (eth_dev->data->dev_conf.intr_conf.rxq)
1264 oxt2_nix_unregister_cq_irqs(eth_dev);
1265 nix_set_nop_rxtx_function(eth_dev);
1266 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1268 goto fail_configure;
1269 otx2_nix_tm_fini(eth_dev);
1273 dev->rx_offloads = rxmode->offloads;
1274 dev->tx_offloads = txmode->offloads;
1275 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1276 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1277 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1279 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1280 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1282 /* Alloc a nix lf */
1283 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1285 otx2_err("Failed to init nix_lf rc=%d", rc);
1290 rc = otx2_nix_rss_config(eth_dev);
1292 otx2_err("Failed to configure rss rc=%d", rc);
1296 /* Init the default TM scheduler hierarchy */
1297 rc = otx2_nix_tm_init_default(eth_dev);
1299 otx2_err("Failed to init traffic manager rc=%d", rc);
1303 rc = otx2_nix_vlan_offload_init(eth_dev);
1305 otx2_err("Failed to init vlan offload rc=%d", rc);
1309 /* Register queue IRQs */
1310 rc = oxt2_nix_register_queue_irqs(eth_dev);
1312 otx2_err("Failed to register queue interrupts rc=%d", rc);
1316 /* Register cq IRQs */
1317 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1318 if (eth_dev->data->nb_rx_queues > dev->cints) {
1319 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1323 /* Rx interrupt feature cannot work with vector mode because,
1324 * vector mode doesn't process packets unless min 4 pkts are
1325 * received, while cq interrupts are generated even for 1 pkt
1328 dev->scalar_ena = true;
1330 rc = oxt2_nix_register_cq_irqs(eth_dev);
1332 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1337 /* Configure loop back mode */
1338 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1340 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1344 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1346 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1351 * Restore queue config when reconfigure followed by
1352 * reconfigure and no queue configure invoked from application case.
1354 if (dev->configured == 1) {
1355 rc = nix_restore_queue_cfg(eth_dev);
1360 /* Update the mac address */
1361 ea = eth_dev->data->mac_addrs;
1362 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1363 if (rte_is_zero_ether_addr(ea))
1364 rte_eth_random_addr((uint8_t *)ea);
1366 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1368 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1369 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1370 " rx_flags=0x%x tx_flags=0x%x",
1371 eth_dev->data->port_id, ea_fmt, nb_rxq,
1372 nb_txq, dev->rx_offloads, dev->tx_offloads,
1373 dev->rx_offload_flags, dev->tx_offload_flags);
1376 dev->configured = 1;
1377 dev->configured_nb_rx_qs = data->nb_rx_queues;
1378 dev->configured_nb_tx_qs = data->nb_tx_queues;
1382 oxt2_nix_unregister_cq_irqs(eth_dev);
1384 oxt2_nix_unregister_queue_irqs(eth_dev);
1386 otx2_nix_vlan_fini(eth_dev);
1388 otx2_nix_tm_fini(eth_dev);
1392 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
1393 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
1395 dev->configured = 0;
1400 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1402 struct rte_eth_dev_data *data = eth_dev->data;
1403 struct otx2_eth_txq *txq;
1406 txq = eth_dev->data->tx_queues[qidx];
1408 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1411 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1413 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
1418 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1425 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1427 struct rte_eth_dev_data *data = eth_dev->data;
1428 struct otx2_eth_txq *txq;
1431 txq = eth_dev->data->tx_queues[qidx];
1433 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1436 txq->fc_cache_pkts = 0;
1438 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1440 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
1445 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1452 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1454 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1455 struct rte_eth_dev_data *data = eth_dev->data;
1458 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1461 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
1463 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
1467 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1474 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1476 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1477 struct rte_eth_dev_data *data = eth_dev->data;
1480 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1483 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
1485 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
1489 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1496 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
1498 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1499 struct rte_mbuf *rx_pkts[32];
1500 struct otx2_eth_rxq *rxq;
1501 int count, i, j, rc;
1503 nix_cgx_stop_link_event(dev);
1504 npc_rx_disable(dev);
1506 /* Stop rx queues and free up pkts pending */
1507 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1508 rc = otx2_nix_rx_queue_stop(eth_dev, i);
1512 rxq = eth_dev->data->rx_queues[i];
1513 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1515 for (j = 0; j < count; j++)
1516 rte_pktmbuf_free(rx_pkts[j]);
1517 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1521 /* Stop tx queues */
1522 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1523 otx2_nix_tx_queue_stop(eth_dev, i);
1527 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
1529 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1532 if (eth_dev->data->nb_rx_queues != 0) {
1533 rc = otx2_nix_recalc_mtu(eth_dev);
1538 /* Start rx queues */
1539 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1540 rc = otx2_nix_rx_queue_start(eth_dev, i);
1545 /* Start tx queues */
1546 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1547 rc = otx2_nix_tx_queue_start(eth_dev, i);
1552 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
1554 otx2_err("Failed to update flow ctrl mode %d", rc);
1558 /* Enable PTP if it was requested by the app or if it is already
1559 * enabled in PF owning this VF
1561 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
1562 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
1563 otx2_ethdev_is_ptp_en(dev))
1564 otx2_nix_timesync_enable(eth_dev);
1566 otx2_nix_timesync_disable(eth_dev);
1568 rc = npc_rx_enable(dev);
1570 otx2_err("Failed to enable NPC rx %d", rc);
1574 otx2_nix_toggle_flag_link_cfg(dev, true);
1576 rc = nix_cgx_start_link_event(dev);
1578 otx2_err("Failed to start cgx link event %d", rc);
1582 otx2_nix_toggle_flag_link_cfg(dev, false);
1583 otx2_eth_set_tx_function(eth_dev);
1584 otx2_eth_set_rx_function(eth_dev);
1589 npc_rx_disable(dev);
1590 otx2_nix_toggle_flag_link_cfg(dev, false);
1594 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
1595 static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
1597 /* Initialize and register driver with DPDK Application */
1598 static const struct eth_dev_ops otx2_eth_dev_ops = {
1599 .dev_infos_get = otx2_nix_info_get,
1600 .dev_configure = otx2_nix_configure,
1601 .link_update = otx2_nix_link_update,
1602 .tx_queue_setup = otx2_nix_tx_queue_setup,
1603 .tx_queue_release = otx2_nix_tx_queue_release,
1604 .rx_queue_setup = otx2_nix_rx_queue_setup,
1605 .rx_queue_release = otx2_nix_rx_queue_release,
1606 .dev_start = otx2_nix_dev_start,
1607 .dev_stop = otx2_nix_dev_stop,
1608 .dev_close = otx2_nix_dev_close,
1609 .tx_queue_start = otx2_nix_tx_queue_start,
1610 .tx_queue_stop = otx2_nix_tx_queue_stop,
1611 .rx_queue_start = otx2_nix_rx_queue_start,
1612 .rx_queue_stop = otx2_nix_rx_queue_stop,
1613 .dev_set_link_up = otx2_nix_dev_set_link_up,
1614 .dev_set_link_down = otx2_nix_dev_set_link_down,
1615 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
1616 .dev_reset = otx2_nix_dev_reset,
1617 .stats_get = otx2_nix_dev_stats_get,
1618 .stats_reset = otx2_nix_dev_stats_reset,
1619 .get_reg = otx2_nix_dev_get_reg,
1620 .mtu_set = otx2_nix_mtu_set,
1621 .mac_addr_add = otx2_nix_mac_addr_add,
1622 .mac_addr_remove = otx2_nix_mac_addr_del,
1623 .mac_addr_set = otx2_nix_mac_addr_set,
1624 .promiscuous_enable = otx2_nix_promisc_enable,
1625 .promiscuous_disable = otx2_nix_promisc_disable,
1626 .allmulticast_enable = otx2_nix_allmulticast_enable,
1627 .allmulticast_disable = otx2_nix_allmulticast_disable,
1628 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
1629 .reta_update = otx2_nix_dev_reta_update,
1630 .reta_query = otx2_nix_dev_reta_query,
1631 .rss_hash_update = otx2_nix_rss_hash_update,
1632 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
1633 .xstats_get = otx2_nix_xstats_get,
1634 .xstats_get_names = otx2_nix_xstats_get_names,
1635 .xstats_reset = otx2_nix_xstats_reset,
1636 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
1637 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
1638 .rxq_info_get = otx2_nix_rxq_info_get,
1639 .txq_info_get = otx2_nix_txq_info_get,
1640 .rx_queue_count = otx2_nix_rx_queue_count,
1641 .rx_descriptor_done = otx2_nix_rx_descriptor_done,
1642 .rx_descriptor_status = otx2_nix_rx_descriptor_status,
1643 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
1644 .pool_ops_supported = otx2_nix_pool_ops_supported,
1645 .filter_ctrl = otx2_nix_dev_filter_ctrl,
1646 .get_module_info = otx2_nix_get_module_info,
1647 .get_module_eeprom = otx2_nix_get_module_eeprom,
1648 .fw_version_get = otx2_nix_fw_version_get,
1649 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
1650 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
1651 .timesync_enable = otx2_nix_timesync_enable,
1652 .timesync_disable = otx2_nix_timesync_disable,
1653 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
1654 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
1655 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
1656 .timesync_read_time = otx2_nix_timesync_read_time,
1657 .timesync_write_time = otx2_nix_timesync_write_time,
1658 .vlan_offload_set = otx2_nix_vlan_offload_set,
1659 .vlan_filter_set = otx2_nix_vlan_filter_set,
1660 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
1661 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
1662 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
1663 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
1664 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
1665 .read_clock = otx2_nix_read_clock,
1669 nix_lf_attach(struct otx2_eth_dev *dev)
1671 struct otx2_mbox *mbox = dev->mbox;
1672 struct rsrc_attach_req *req;
1674 /* Attach NIX(lf) */
1675 req = otx2_mbox_alloc_msg_attach_resources(mbox);
1679 return otx2_mbox_process(mbox);
1683 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
1685 struct otx2_mbox *mbox = dev->mbox;
1686 struct msix_offset_rsp *msix_rsp;
1689 /* Get NPA and NIX MSIX vector offsets */
1690 otx2_mbox_alloc_msg_msix_offset(mbox);
1692 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
1694 dev->nix_msixoff = msix_rsp->nix_msixoff;
1700 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
1702 struct rsrc_detach_req *req;
1704 req = otx2_mbox_alloc_msg_detach_resources(mbox);
1706 /* Detach all except npa lf */
1707 req->partial = true;
1714 return otx2_mbox_process(mbox);
1718 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
1720 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1721 struct rte_pci_device *pci_dev;
1722 int rc, max_entries;
1724 eth_dev->dev_ops = &otx2_eth_dev_ops;
1726 /* For secondary processes, the primary has done all the work */
1727 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1728 /* Setup callbacks for secondary process */
1729 otx2_eth_set_tx_function(eth_dev);
1730 otx2_eth_set_rx_function(eth_dev);
1734 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1736 rte_eth_copy_pci_info(eth_dev, pci_dev);
1737 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1739 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
1740 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
1741 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
1743 /* Parse devargs string */
1744 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1746 otx2_err("Failed to parse devargs rc=%d", rc);
1750 if (!dev->mbox_active) {
1751 /* Initialize the base otx2_dev object
1752 * only if already present
1754 rc = otx2_dev_init(pci_dev, dev);
1756 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1760 /* Device generic callbacks */
1761 dev->ops = &otx2_dev_ops;
1762 dev->eth_dev = eth_dev;
1764 /* Grab the NPA LF if required */
1765 rc = otx2_npa_lf_init(pci_dev, dev);
1767 goto otx2_dev_uninit;
1769 dev->configured = 0;
1770 dev->drv_inited = true;
1771 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
1772 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
1775 rc = nix_lf_attach(dev);
1777 goto otx2_npa_uninit;
1779 /* Get NIX MSIX offset */
1780 rc = nix_lf_get_msix_offset(dev);
1782 goto otx2_npa_uninit;
1784 /* Register LF irq handlers */
1785 rc = otx2_nix_register_irqs(eth_dev);
1789 /* Get maximum number of supported MAC entries */
1790 max_entries = otx2_cgx_mac_max_entries_get(dev);
1791 if (max_entries < 0) {
1792 otx2_err("Failed to get max entries for mac addr");
1794 goto unregister_irq;
1797 /* For VFs, returned max_entries will be 0. But to keep default MAC
1798 * address, one entry must be allocated. So setting up to 1.
1800 if (max_entries == 0)
1803 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
1804 RTE_ETHER_ADDR_LEN, 0);
1805 if (eth_dev->data->mac_addrs == NULL) {
1806 otx2_err("Failed to allocate memory for mac addr");
1808 goto unregister_irq;
1811 dev->max_mac_entries = max_entries;
1813 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
1815 goto free_mac_addrs;
1817 /* Update the mac address */
1818 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1820 /* Also sync same MAC address to CGX table */
1821 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
1823 /* Initialize the tm data structures */
1824 otx2_nix_tm_conf_init(eth_dev);
1826 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
1827 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
1829 if (otx2_dev_is_Ax(dev)) {
1830 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
1831 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
1834 /* Initialize rte-flow */
1835 rc = otx2_flow_init(dev);
1837 goto free_mac_addrs;
1839 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
1840 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
1841 eth_dev->data->port_id, dev->pf, dev->vf,
1842 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
1843 dev->rx_offload_capa, dev->tx_offload_capa);
1847 rte_free(eth_dev->data->mac_addrs);
1849 otx2_nix_unregister_irqs(eth_dev);
1851 otx2_eth_dev_lf_detach(dev->mbox);
1855 otx2_dev_fini(pci_dev, dev);
1857 otx2_err("Failed to init nix eth_dev rc=%d", rc);
1862 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
1864 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1865 struct rte_pci_device *pci_dev;
1868 /* Nothing to be done for secondary processes */
1869 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1872 /* Clear the flag since we are closing down */
1873 dev->configured = 0;
1875 /* Disable nix bpid config */
1876 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1878 npc_rx_disable(dev);
1880 /* Disable vlan offloads */
1881 otx2_nix_vlan_fini(eth_dev);
1883 /* Disable other rte_flow entries */
1884 otx2_flow_fini(dev);
1886 /* Disable PTP if already enabled */
1887 if (otx2_ethdev_is_ptp_en(dev))
1888 otx2_nix_timesync_disable(eth_dev);
1890 nix_cgx_stop_link_event(dev);
1893 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1894 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
1895 eth_dev->data->tx_queues[i] = NULL;
1897 eth_dev->data->nb_tx_queues = 0;
1899 /* Free up RQ's and CQ's */
1900 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1901 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
1902 eth_dev->data->rx_queues[i] = NULL;
1904 eth_dev->data->nb_rx_queues = 0;
1906 /* Free tm resources */
1907 rc = otx2_nix_tm_fini(eth_dev);
1909 otx2_err("Failed to cleanup tm, rc=%d", rc);
1911 /* Unregister queue irqs */
1912 oxt2_nix_unregister_queue_irqs(eth_dev);
1914 /* Unregister cq irqs */
1915 if (eth_dev->data->dev_conf.intr_conf.rxq)
1916 oxt2_nix_unregister_cq_irqs(eth_dev);
1918 rc = nix_lf_free(dev);
1920 otx2_err("Failed to free nix lf, rc=%d", rc);
1922 rc = otx2_npa_lf_fini();
1924 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
1926 rte_free(eth_dev->data->mac_addrs);
1927 eth_dev->data->mac_addrs = NULL;
1928 dev->drv_inited = false;
1930 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1931 otx2_nix_unregister_irqs(eth_dev);
1933 rc = otx2_eth_dev_lf_detach(dev->mbox);
1935 otx2_err("Failed to detach resources, rc=%d", rc);
1937 /* Check if mbox close is needed */
1941 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
1942 /* Will be freed later by PMD */
1943 eth_dev->data->dev_private = NULL;
1947 otx2_dev_fini(pci_dev, dev);
1952 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
1954 otx2_eth_dev_uninit(eth_dev, true);
1958 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
1962 rc = otx2_eth_dev_uninit(eth_dev, false);
1966 return otx2_eth_dev_init(eth_dev);
1970 nix_remove(struct rte_pci_device *pci_dev)
1972 struct rte_eth_dev *eth_dev;
1973 struct otx2_idev_cfg *idev;
1974 struct otx2_dev *otx2_dev;
1977 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
1979 /* Cleanup eth dev */
1980 rc = otx2_eth_dev_uninit(eth_dev, true);
1984 rte_eth_dev_pci_release(eth_dev);
1987 /* Nothing to be done for secondary processes */
1988 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1991 /* Check for common resources */
1992 idev = otx2_intra_dev_get_cfg();
1993 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
1996 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
1998 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2001 /* Safe to cleanup mbox as no more users */
2002 otx2_dev_fini(pci_dev, otx2_dev);
2007 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2012 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2016 RTE_SET_USED(pci_drv);
2018 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2021 /* On error on secondary, recheck if port exists in primary or
2022 * in mid of detach state.
2024 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2025 if (!rte_eth_dev_allocated(pci_dev->device.name))
2030 static const struct rte_pci_id pci_nix_map[] = {
2032 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2035 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2038 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2039 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2046 static struct rte_pci_driver pci_nix = {
2047 .id_table = pci_nix_map,
2048 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2049 RTE_PCI_DRV_INTR_LSC,
2051 .remove = nix_remove,
2054 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
2055 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
2056 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");