1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
16 static inline uint64_t
17 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
19 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
21 if (otx2_dev_is_vf(dev))
22 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
27 static inline uint64_t
28 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
30 uint64_t capa = NIX_TX_OFFLOAD_CAPA;
32 /* TSO not supported for earlier chip revisions */
33 if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
34 capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
35 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
36 DEV_TX_OFFLOAD_GENEVE_TNL_TSO);
40 static const struct otx2_dev_ops otx2_dev_ops = {
41 .link_status_update = otx2_eth_dev_link_status_update,
42 .ptp_info_update = otx2_eth_dev_ptp_info_update
46 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
48 struct otx2_mbox *mbox = dev->mbox;
49 struct nix_lf_alloc_req *req;
50 struct nix_lf_alloc_rsp *rsp;
53 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
57 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
58 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
59 req->xqe_sz = NIX_XQESZ_W16;
60 req->rss_sz = dev->rss_info.rss_size;
61 req->rss_grps = NIX_RSS_GRPS;
62 req->npa_func = otx2_npa_pf_func_get();
63 req->sso_func = otx2_sso_pf_func_get();
64 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
65 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
66 DEV_RX_OFFLOAD_UDP_CKSUM)) {
67 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
68 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
70 req->rx_cfg |= BIT_ULL(32 /* DROP_RE */);
72 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
76 dev->sqb_size = rsp->sqb_size;
77 dev->tx_chan_base = rsp->tx_chan_base;
78 dev->rx_chan_base = rsp->rx_chan_base;
79 dev->rx_chan_cnt = rsp->rx_chan_cnt;
80 dev->tx_chan_cnt = rsp->tx_chan_cnt;
81 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
82 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
83 dev->lf_tx_stats = rsp->lf_tx_stats;
84 dev->lf_rx_stats = rsp->lf_rx_stats;
85 dev->cints = rsp->cints;
86 dev->qints = rsp->qints;
87 dev->npc_flow.channel = dev->rx_chan_base;
93 nix_lf_free(struct otx2_eth_dev *dev)
95 struct otx2_mbox *mbox = dev->mbox;
96 struct nix_lf_free_req *req;
97 struct ndc_sync_op *ndc_req;
100 /* Sync NDC-NIX for LF */
101 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
102 ndc_req->nix_lf_tx_sync = 1;
103 ndc_req->nix_lf_rx_sync = 1;
104 rc = otx2_mbox_process(mbox);
106 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
108 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
109 /* Let AF driver free all this nix lf's
110 * NPC entries allocated using NPC MBOX.
114 return otx2_mbox_process(mbox);
118 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
120 struct otx2_mbox *mbox = dev->mbox;
122 if (otx2_dev_is_vf(dev))
125 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
127 return otx2_mbox_process(mbox);
131 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
133 struct otx2_mbox *mbox = dev->mbox;
135 if (otx2_dev_is_vf(dev))
138 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
140 return otx2_mbox_process(mbox);
144 npc_rx_enable(struct otx2_eth_dev *dev)
146 struct otx2_mbox *mbox = dev->mbox;
148 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
150 return otx2_mbox_process(mbox);
154 npc_rx_disable(struct otx2_eth_dev *dev)
156 struct otx2_mbox *mbox = dev->mbox;
158 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
160 return otx2_mbox_process(mbox);
164 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
166 struct otx2_mbox *mbox = dev->mbox;
168 if (otx2_dev_is_vf(dev))
171 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
173 return otx2_mbox_process(mbox);
177 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
179 struct otx2_mbox *mbox = dev->mbox;
181 if (otx2_dev_is_vf(dev))
185 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
187 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
189 return otx2_mbox_process(mbox);
193 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
195 struct otx2_mbox *mbox = dev->mbox;
197 if (otx2_dev_is_vf(dev))
200 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
202 return otx2_mbox_process(mbox);
206 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
212 static inline uint32_t
213 nix_qsize_to_val(enum nix_q_size_e qsize)
215 return (16UL << (qsize * 2));
218 static inline enum nix_q_size_e
219 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
223 if (otx2_ethdev_fixup_is_min_4k_q(dev))
228 for (; i < nix_q_size_max; i++)
229 if (val <= nix_qsize_to_val(i))
232 if (i >= nix_q_size_max)
233 i = nix_q_size_max - 1;
239 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
240 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
242 struct otx2_mbox *mbox = dev->mbox;
243 const struct rte_memzone *rz;
244 uint32_t ring_size, cq_size;
245 struct nix_aq_enq_req *aq;
250 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
251 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
252 NIX_CQ_ALIGN, dev->node);
254 otx2_err("Failed to allocate mem for cq hw ring");
258 memset(rz->addr, 0, rz->len);
259 rxq->desc = (uintptr_t)rz->addr;
260 rxq->qmask = cq_size - 1;
262 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
264 aq->ctype = NIX_AQ_CTYPE_CQ;
265 aq->op = NIX_AQ_INSTOP_INIT;
269 aq->cq.qsize = rxq->qsize;
270 aq->cq.base = rz->iova;
271 aq->cq.avg_level = 0xff;
272 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
273 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
275 /* Many to one reduction */
276 aq->cq.qint_idx = qid % dev->qints;
277 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
278 aq->cq.cint_idx = qid;
280 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
281 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
282 uint16_t min_rx_drop;
284 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
285 aq->cq.drop = min_rx_drop;
287 rxq->cq_drop = min_rx_drop;
289 rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
290 aq->cq.drop = rxq->cq_drop;
294 /* TX pause frames enable flowctrl on RX side */
295 if (dev->fc_info.tx_pause) {
296 /* Single bpid is allocated for all rx channels for now */
297 aq->cq.bpid = dev->fc_info.bpid[0];
298 aq->cq.bp = rxq->cq_drop;
302 rc = otx2_mbox_process(mbox);
304 otx2_err("Failed to init cq context");
308 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
310 aq->ctype = NIX_AQ_CTYPE_RQ;
311 aq->op = NIX_AQ_INSTOP_INIT;
314 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
316 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
317 first_skip = (sizeof(struct rte_mbuf));
318 first_skip += RTE_PKTMBUF_HEADROOM;
319 first_skip += rte_pktmbuf_priv_size(mp);
320 rxq->data_off = first_skip;
322 first_skip /= 8; /* Expressed in number of dwords */
323 aq->rq.first_skip = first_skip;
324 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
325 aq->rq.flow_tagw = 32; /* 32-bits */
326 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
327 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
328 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
329 aq->rq.lpb_sizem1 /= 8;
330 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
332 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
333 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
334 aq->rq.rq_int_ena = 0;
335 /* Many to one reduction */
336 aq->rq.qint_idx = qid % dev->qints;
338 aq->rq.xqe_drop_ena = 1;
340 rc = otx2_mbox_process(mbox);
342 otx2_err("Failed to init rq context");
352 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
353 struct otx2_eth_rxq *rxq, const bool enb)
355 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
356 struct otx2_mbox *mbox = dev->mbox;
357 struct nix_aq_enq_req *aq;
359 /* Pkts will be dropped silently if RQ is disabled */
360 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
362 aq->ctype = NIX_AQ_CTYPE_RQ;
363 aq->op = NIX_AQ_INSTOP_WRITE;
366 aq->rq_mask.ena = ~(aq->rq_mask.ena);
368 return otx2_mbox_process(mbox);
372 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
374 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
375 struct otx2_mbox *mbox = dev->mbox;
376 struct nix_aq_enq_req *aq;
379 /* RQ is already disabled */
381 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
383 aq->ctype = NIX_AQ_CTYPE_CQ;
384 aq->op = NIX_AQ_INSTOP_WRITE;
387 aq->cq_mask.ena = ~(aq->cq_mask.ena);
389 rc = otx2_mbox_process(mbox);
391 otx2_err("Failed to disable cq context");
399 nix_get_data_off(struct otx2_eth_dev *dev)
401 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
405 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
407 struct rte_mbuf mb_def;
410 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
411 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
412 offsetof(struct rte_mbuf, data_off) != 2);
413 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
414 offsetof(struct rte_mbuf, data_off) != 4);
415 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
416 offsetof(struct rte_mbuf, data_off) != 6);
418 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
419 mb_def.port = port_id;
420 rte_mbuf_refcnt_set(&mb_def, 1);
422 /* Prevent compiler reordering: rearm_data covers previous fields */
423 rte_compiler_barrier();
424 tmp = (uint64_t *)&mb_def.rearm_data;
430 otx2_nix_rx_queue_release(void *rx_queue)
432 struct otx2_eth_rxq *rxq = rx_queue;
437 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
438 nix_cq_rq_uninit(rxq->eth_dev, rxq);
443 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
444 uint16_t nb_desc, unsigned int socket,
445 const struct rte_eth_rxconf *rx_conf,
446 struct rte_mempool *mp)
448 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
449 struct rte_mempool_ops *ops;
450 struct otx2_eth_rxq *rxq;
451 const char *platform_ops;
452 enum nix_q_size_e qsize;
458 /* Compile time check to make sure all fast path elements in a CL */
459 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
462 if (rx_conf->rx_deferred_start == 1) {
463 otx2_err("Deferred Rx start is not supported");
467 platform_ops = rte_mbuf_platform_mempool_ops();
468 /* This driver needs octeontx2_npa mempool ops to work */
469 ops = rte_mempool_get_ops(mp->ops_index);
470 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
471 otx2_err("mempool ops should be of octeontx2_npa type");
475 if (mp->pool_id == 0) {
476 otx2_err("Invalid pool_id");
480 /* Free memory prior to re-allocation if needed */
481 if (eth_dev->data->rx_queues[rq] != NULL) {
482 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
483 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
484 eth_dev->data->rx_queues[rq] = NULL;
487 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
488 dev->rx_offloads |= offloads;
490 /* Find the CQ queue size */
491 qsize = nix_qsize_clampup_get(dev, nb_desc);
492 /* Allocate rxq memory */
493 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
495 otx2_err("Failed to allocate rq=%d", rq);
500 rxq->eth_dev = eth_dev;
502 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
503 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
504 rxq->wdata = (uint64_t)rq << 32;
505 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
506 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
507 eth_dev->data->port_id);
508 rxq->offloads = offloads;
510 rxq->qlen = nix_qsize_to_val(qsize);
512 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
513 rxq->tstamp = &dev->tstamp;
515 /* Alloc completion queue */
516 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
518 otx2_err("Failed to allocate rxq=%u", rq);
522 rxq->qconf.socket_id = socket;
523 rxq->qconf.nb_desc = nb_desc;
524 rxq->qconf.mempool = mp;
525 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
527 nix_rx_queue_reset(rxq);
528 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
529 rq, mp->name, qsize, nb_desc, rxq->qlen);
531 eth_dev->data->rx_queues[rq] = rxq;
532 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
534 /* Calculating delta and freq mult between PTP HI clock and tsc.
535 * These are needed in deriving raw clock value from tsc counter.
536 * read_clock eth op returns raw clock value.
538 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
539 otx2_ethdev_is_ptp_en(dev)) {
540 rc = otx2_nix_raw_clock_tsc_conv(dev);
542 otx2_err("Failed to calculate delta and freq mult");
550 otx2_nix_rx_queue_release(rxq);
555 static inline uint8_t
556 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
559 * Maximum three segments can be supported with W8, Choose
560 * NIX_MAXSQESZ_W16 for multi segment offload.
562 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
563 return NIX_MAXSQESZ_W16;
565 return NIX_MAXSQESZ_W8;
569 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
571 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
572 struct rte_eth_dev_data *data = eth_dev->data;
573 struct rte_eth_conf *conf = &data->dev_conf;
574 struct rte_eth_rxmode *rxmode = &conf->rxmode;
577 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
578 flags |= NIX_RX_OFFLOAD_RSS_F;
580 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
581 DEV_RX_OFFLOAD_UDP_CKSUM))
582 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
584 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
585 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
586 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
588 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
589 flags |= NIX_RX_MULTI_SEG_F;
591 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
592 DEV_RX_OFFLOAD_QINQ_STRIP))
593 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
595 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
596 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
602 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
604 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
605 uint64_t conf = dev->tx_offloads;
608 /* Fastpath is dependent on these enums */
609 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
610 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
611 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
612 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
613 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
614 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
615 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
616 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
617 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
618 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
619 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
620 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
621 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
622 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
623 offsetof(struct rte_mbuf, buf_iova) + 8);
624 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
625 offsetof(struct rte_mbuf, buf_iova) + 16);
626 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
627 offsetof(struct rte_mbuf, ol_flags) + 12);
628 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
629 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
631 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
632 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
633 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
635 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
636 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
637 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
639 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
640 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
641 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
642 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
643 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
645 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
646 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
648 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
649 flags |= NIX_TX_MULTI_SEG_F;
651 /* Enable Inner checksum for TSO */
652 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
653 flags |= (NIX_TX_OFFLOAD_TSO_F |
654 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
656 /* Enable Inner and Outer checksum for Tunnel TSO */
657 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
658 DEV_TX_OFFLOAD_GENEVE_TNL_TSO))
659 flags |= (NIX_TX_OFFLOAD_TSO_F |
660 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
661 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
663 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
664 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
670 nix_sq_init(struct otx2_eth_txq *txq)
672 struct otx2_eth_dev *dev = txq->dev;
673 struct otx2_mbox *mbox = dev->mbox;
674 struct nix_aq_enq_req *sq;
679 if (txq->sqb_pool->pool_id == 0)
682 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
684 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
688 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
690 sq->ctype = NIX_AQ_CTYPE_SQ;
691 sq->op = NIX_AQ_INSTOP_INIT;
692 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
695 sq->sq.smq_rr_quantum = rr_quantum;
696 sq->sq.default_chan = dev->tx_chan_base;
697 sq->sq.sqe_stype = NIX_STYPE_STF;
699 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
700 sq->sq.sqe_stype = NIX_STYPE_STP;
702 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
703 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
704 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
705 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
706 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
708 /* Many to one reduction */
709 sq->sq.qint_idx = txq->sq % dev->qints;
711 return otx2_mbox_process(mbox);
715 nix_sq_uninit(struct otx2_eth_txq *txq)
717 struct otx2_eth_dev *dev = txq->dev;
718 struct otx2_mbox *mbox = dev->mbox;
719 struct ndc_sync_op *ndc_req;
720 struct nix_aq_enq_rsp *rsp;
721 struct nix_aq_enq_req *aq;
722 uint16_t sqes_per_sqb;
726 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
728 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
730 aq->ctype = NIX_AQ_CTYPE_SQ;
731 aq->op = NIX_AQ_INSTOP_READ;
733 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
737 /* Check if sq is already cleaned up */
742 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
744 aq->ctype = NIX_AQ_CTYPE_SQ;
745 aq->op = NIX_AQ_INSTOP_WRITE;
747 aq->sq_mask.ena = ~aq->sq_mask.ena;
750 rc = otx2_mbox_process(mbox);
754 /* Read SQ and free sqb's */
755 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
757 aq->ctype = NIX_AQ_CTYPE_SQ;
758 aq->op = NIX_AQ_INSTOP_READ;
760 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
765 otx2_err("SQ has pending sqe's");
767 count = aq->sq.sqb_count;
768 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
769 /* Free SQB's that are used */
770 sqb_buf = (void *)rsp->sq.head_sqb;
774 next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
775 ((sqes_per_sqb - 1) *
776 nix_sq_max_sqe_sz(txq)));
777 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
783 /* Free next to use sqb */
784 if (rsp->sq.next_sqb)
785 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
788 /* Sync NDC-NIX-TX for LF */
789 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
790 ndc_req->nix_lf_tx_sync = 1;
791 rc = otx2_mbox_process(mbox);
793 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
799 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
801 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
802 struct npa_aq_enq_req *aura_req;
804 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
805 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
806 aura_req->ctype = NPA_AQ_CTYPE_AURA;
807 aura_req->op = NPA_AQ_INSTOP_WRITE;
809 aura_req->aura.limit = nb_sqb_bufs;
810 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
812 return otx2_mbox_process(npa_lf->mbox);
816 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
818 struct otx2_eth_dev *dev = txq->dev;
819 uint16_t sqes_per_sqb, nb_sqb_bufs;
820 char name[RTE_MEMPOOL_NAMESIZE];
821 struct rte_mempool_objsz sz;
822 struct npa_aura_s *aura;
823 uint32_t tmp, blk_sz;
825 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
826 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
827 blk_sz = dev->sqb_size;
829 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
830 sqes_per_sqb = (dev->sqb_size / 8) / 16;
832 sqes_per_sqb = (dev->sqb_size / 8) / 8;
834 nb_sqb_bufs = nb_desc / sqes_per_sqb;
835 /* Clamp up to devarg passed SQB count */
836 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
837 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
839 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
841 MEMPOOL_F_NO_SPREAD);
842 txq->nb_sqb_bufs = nb_sqb_bufs;
843 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
844 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
845 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
846 txq->nb_sqb_bufs_adj =
847 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
849 if (txq->sqb_pool == NULL) {
850 otx2_err("Failed to allocate sqe mempool");
854 memset(aura, 0, sizeof(*aura));
856 aura->fc_addr = txq->fc_iova;
857 aura->fc_hyst_bits = 0; /* Store count on all updates */
858 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
859 otx2_err("Failed to set ops for sqe mempool");
862 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
863 otx2_err("Failed to populate sqe mempool");
867 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
868 if (dev->sqb_size != sz.elt_size) {
869 otx2_err("sqe pool block size is not expected %d != %d",
874 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
882 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
884 struct nix_send_ext_s *send_hdr_ext;
885 struct nix_send_hdr_s *send_hdr;
886 struct nix_send_mem_s *send_mem;
887 union nix_send_sg_s *sg;
889 /* Initialize the fields based on basic single segment packet */
890 memset(&txq->cmd, 0, sizeof(txq->cmd));
892 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
893 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
894 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
895 send_hdr->w0.sizem1 = 2;
897 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
898 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
899 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
900 /* Default: one seg packet would have:
901 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
904 send_hdr->w0.sizem1 = 3;
905 send_hdr_ext->w0.tstmp = 1;
907 /* To calculate the offset for send_mem,
908 * send_hdr->w0.sizem1 * 2
910 send_mem = (struct nix_send_mem_s *)(txq->cmd +
911 (send_hdr->w0.sizem1 << 1));
912 send_mem->subdc = NIX_SUBDC_MEM;
913 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
914 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
916 sg = (union nix_send_sg_s *)&txq->cmd[4];
918 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
919 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
920 send_hdr->w0.sizem1 = 1;
921 sg = (union nix_send_sg_s *)&txq->cmd[2];
924 send_hdr->w0.sq = txq->sq;
925 sg->subdc = NIX_SUBDC_SG;
927 sg->ld_type = NIX_SENDLDTYPE_LDD;
933 otx2_nix_tx_queue_release(void *_txq)
935 struct otx2_eth_txq *txq = _txq;
936 struct rte_eth_dev *eth_dev;
941 eth_dev = txq->dev->eth_dev;
943 otx2_nix_dbg("Releasing txq %u", txq->sq);
945 /* Flush and disable tm */
946 otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
948 /* Free sqb's and disable sq */
952 rte_mempool_free(txq->sqb_pool);
953 txq->sqb_pool = NULL;
960 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
961 uint16_t nb_desc, unsigned int socket_id,
962 const struct rte_eth_txconf *tx_conf)
964 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
965 const struct rte_memzone *fc;
966 struct otx2_eth_txq *txq;
972 /* Compile time check to make sure all fast path elements in a CL */
973 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
975 if (tx_conf->tx_deferred_start) {
976 otx2_err("Tx deferred start is not supported");
980 /* Free memory prior to re-allocation if needed. */
981 if (eth_dev->data->tx_queues[sq] != NULL) {
982 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
983 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
984 eth_dev->data->tx_queues[sq] = NULL;
987 /* Find the expected offloads for this queue */
988 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
990 /* Allocating tx queue data structure */
991 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
992 OTX2_ALIGN, socket_id);
994 otx2_err("Failed to alloc txq=%d", sq);
1000 txq->sqb_pool = NULL;
1001 txq->offloads = offloads;
1002 dev->tx_offloads |= offloads;
1005 * Allocate memory for flow control updates from HW.
1006 * Alloc one cache line, so that fits all FC_STYPE modes.
1008 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1009 OTX2_ALIGN + sizeof(struct npa_aura_s),
1010 OTX2_ALIGN, dev->node);
1012 otx2_err("Failed to allocate mem for fcmem");
1016 txq->fc_iova = fc->iova;
1017 txq->fc_mem = fc->addr;
1019 /* Initialize the aura sqb pool */
1020 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1022 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1026 /* Initialize the SQ */
1027 rc = nix_sq_init(txq);
1029 otx2_err("Failed to init sq=%d context", sq);
1033 txq->fc_cache_pkts = 0;
1034 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1035 /* Evenly distribute LMT slot for each sq */
1036 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1038 txq->qconf.socket_id = socket_id;
1039 txq->qconf.nb_desc = nb_desc;
1040 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1042 otx2_nix_form_default_desc(txq);
1044 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1045 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1046 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1047 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1048 eth_dev->data->tx_queues[sq] = txq;
1049 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1053 otx2_nix_tx_queue_release(txq);
1059 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1061 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1062 struct otx2_eth_qconf *tx_qconf = NULL;
1063 struct otx2_eth_qconf *rx_qconf = NULL;
1064 struct otx2_eth_txq **txq;
1065 struct otx2_eth_rxq **rxq;
1066 int i, nb_rxq, nb_txq;
1068 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1069 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1071 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1072 if (tx_qconf == NULL) {
1073 otx2_err("Failed to allocate memory for tx_qconf");
1077 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1078 if (rx_qconf == NULL) {
1079 otx2_err("Failed to allocate memory for rx_qconf");
1083 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1084 for (i = 0; i < nb_txq; i++) {
1085 if (txq[i] == NULL) {
1086 otx2_err("txq[%d] is already released", i);
1089 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1090 otx2_nix_tx_queue_release(txq[i]);
1091 eth_dev->data->tx_queues[i] = NULL;
1094 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1095 for (i = 0; i < nb_rxq; i++) {
1096 if (rxq[i] == NULL) {
1097 otx2_err("rxq[%d] is already released", i);
1100 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1101 otx2_nix_rx_queue_release(rxq[i]);
1102 eth_dev->data->rx_queues[i] = NULL;
1105 dev->tx_qconf = tx_qconf;
1106 dev->rx_qconf = rx_qconf;
1119 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1121 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1122 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1123 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1124 struct otx2_eth_txq **txq;
1125 struct otx2_eth_rxq **rxq;
1126 int rc, i, nb_rxq, nb_txq;
1128 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1129 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1132 /* Setup tx & rx queues with previous configuration so
1133 * that the queues can be functional in cases like ports
1134 * are started without re configuring queues.
1136 * Usual re config sequence is like below:
1137 * port_configure() {
1142 * queue_configure() {
1149 * In some application's control path, queue_configure() would
1150 * NOT be invoked for TXQs/RXQs in port_configure().
1151 * In such cases, queues can be functional after start as the
1152 * queues are already setup in port_configure().
1154 for (i = 0; i < nb_txq; i++) {
1155 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1156 tx_qconf[i].socket_id,
1157 &tx_qconf[i].conf.tx);
1159 otx2_err("Failed to setup tx queue rc=%d", rc);
1160 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1161 for (i -= 1; i >= 0; i--)
1162 otx2_nix_tx_queue_release(txq[i]);
1167 free(tx_qconf); tx_qconf = NULL;
1169 for (i = 0; i < nb_rxq; i++) {
1170 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1171 rx_qconf[i].socket_id,
1172 &rx_qconf[i].conf.rx,
1173 rx_qconf[i].mempool);
1175 otx2_err("Failed to setup rx queue rc=%d", rc);
1176 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1177 for (i -= 1; i >= 0; i--)
1178 otx2_nix_rx_queue_release(rxq[i]);
1179 goto release_tx_queues;
1183 free(rx_qconf); rx_qconf = NULL;
1188 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1189 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1190 otx2_nix_tx_queue_release(txq[i]);
1201 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1203 RTE_SET_USED(queue);
1204 RTE_SET_USED(mbufs);
1211 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1213 /* These dummy functions are required for supporting
1214 * some applications which reconfigure queues without
1215 * stopping tx burst and rx burst threads(eg kni app)
1216 * When the queues context is saved, txq/rxqs are released
1217 * which caused app crash since rx/tx burst is still
1218 * on different lcores
1220 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1221 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1226 nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1228 volatile struct nix_lso_format *field;
1230 /* Format works only with TCP packet marked by OL3/OL4 */
1231 field = (volatile struct nix_lso_format *)&req->fields[0];
1232 req->field_mask = NIX_LSO_FIELD_MASK;
1233 /* Outer IPv4/IPv6 */
1234 field->layer = NIX_TXLAYER_OL3;
1235 field->offset = v4 ? 2 : 4;
1236 field->sizem1 = 1; /* 2B */
1237 field->alg = NIX_LSOALG_ADD_PAYLEN;
1241 field->layer = NIX_TXLAYER_OL3;
1244 /* Incremented linearly per segment */
1245 field->alg = NIX_LSOALG_ADD_SEGNUM;
1249 /* TCP sequence number update */
1250 field->layer = NIX_TXLAYER_OL4;
1252 field->sizem1 = 3; /* 4 bytes */
1253 field->alg = NIX_LSOALG_ADD_OFFSET;
1255 /* TCP flags field */
1256 field->layer = NIX_TXLAYER_OL4;
1259 field->alg = NIX_LSOALG_TCP_FLAGS;
1264 nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1265 bool outer_v4, bool inner_v4)
1267 volatile struct nix_lso_format *field;
1269 field = (volatile struct nix_lso_format *)&req->fields[0];
1270 req->field_mask = NIX_LSO_FIELD_MASK;
1271 /* Outer IPv4/IPv6 len */
1272 field->layer = NIX_TXLAYER_OL3;
1273 field->offset = outer_v4 ? 2 : 4;
1274 field->sizem1 = 1; /* 2B */
1275 field->alg = NIX_LSOALG_ADD_PAYLEN;
1279 field->layer = NIX_TXLAYER_OL3;
1282 /* Incremented linearly per segment */
1283 field->alg = NIX_LSOALG_ADD_SEGNUM;
1287 /* Outer UDP length */
1288 field->layer = NIX_TXLAYER_OL4;
1291 field->alg = NIX_LSOALG_ADD_PAYLEN;
1294 /* Inner IPv4/IPv6 */
1295 field->layer = NIX_TXLAYER_IL3;
1296 field->offset = inner_v4 ? 2 : 4;
1297 field->sizem1 = 1; /* 2B */
1298 field->alg = NIX_LSOALG_ADD_PAYLEN;
1302 field->layer = NIX_TXLAYER_IL3;
1305 /* Incremented linearly per segment */
1306 field->alg = NIX_LSOALG_ADD_SEGNUM;
1310 /* TCP sequence number update */
1311 field->layer = NIX_TXLAYER_IL4;
1313 field->sizem1 = 3; /* 4 bytes */
1314 field->alg = NIX_LSOALG_ADD_OFFSET;
1317 /* TCP flags field */
1318 field->layer = NIX_TXLAYER_IL4;
1321 field->alg = NIX_LSOALG_TCP_FLAGS;
1326 nix_setup_lso_formats(struct otx2_eth_dev *dev)
1328 struct otx2_mbox *mbox = dev->mbox;
1329 struct nix_lso_format_cfg_rsp *rsp;
1330 struct nix_lso_format_cfg *req;
1334 /* Skip if TSO was not requested */
1335 if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1340 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1341 nix_lso_tcp(req, true);
1342 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1346 base = rsp->lso_format_idx;
1347 if (base != NIX_LSO_FORMAT_IDX_TSOV4)
1349 dev->lso_base_idx = base;
1350 otx2_nix_dbg("tcpv4 lso fmt=%u", base);
1356 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1357 nix_lso_tcp(req, false);
1358 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1362 if (rsp->lso_format_idx != base + 1)
1364 otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
1367 * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1369 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1370 nix_lso_udp_tun_tcp(req, true, true);
1371 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1375 if (rsp->lso_format_idx != base + 2)
1377 otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
1380 * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1382 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1383 nix_lso_udp_tun_tcp(req, true, false);
1384 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1388 if (rsp->lso_format_idx != base + 3)
1390 otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
1393 * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1395 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1396 nix_lso_udp_tun_tcp(req, false, true);
1397 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1401 if (rsp->lso_format_idx != base + 4)
1403 otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
1406 * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1408 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1409 nix_lso_udp_tun_tcp(req, false, false);
1410 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1413 if (rsp->lso_format_idx != base + 5)
1415 otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
1421 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1423 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1424 struct rte_eth_dev_data *data = eth_dev->data;
1425 struct rte_eth_conf *conf = &data->dev_conf;
1426 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1427 struct rte_eth_txmode *txmode = &conf->txmode;
1428 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1429 struct rte_ether_addr *ea;
1430 uint8_t nb_rxq, nb_txq;
1436 if (rte_eal_has_hugepages() == 0) {
1437 otx2_err("Huge page is not configured");
1438 goto fail_configure;
1441 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1442 otx2_err("Setting link speed/duplex not supported");
1443 goto fail_configure;
1446 if (conf->dcb_capability_en == 1) {
1447 otx2_err("dcb enable is not supported");
1448 goto fail_configure;
1451 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1452 otx2_err("Flow director is not supported");
1453 goto fail_configure;
1456 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1457 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1458 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1459 goto fail_configure;
1462 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1463 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1464 goto fail_configure;
1467 if (otx2_dev_is_Ax(dev) &&
1468 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1469 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1470 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1471 otx2_err("Outer IP and SCTP checksum unsupported");
1472 goto fail_configure;
1475 /* Free the resources allocated from the previous configure */
1476 if (dev->configured == 1) {
1477 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1478 otx2_nix_vlan_fini(eth_dev);
1479 otx2_flow_free_all_resources(dev);
1480 oxt2_nix_unregister_queue_irqs(eth_dev);
1481 if (eth_dev->data->dev_conf.intr_conf.rxq)
1482 oxt2_nix_unregister_cq_irqs(eth_dev);
1483 nix_set_nop_rxtx_function(eth_dev);
1484 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1486 goto fail_configure;
1487 otx2_nix_tm_fini(eth_dev);
1491 dev->rx_offloads = rxmode->offloads;
1492 dev->tx_offloads = txmode->offloads;
1493 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1494 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1495 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1497 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1498 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1500 /* Alloc a nix lf */
1501 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1503 otx2_err("Failed to init nix_lf rc=%d", rc);
1507 rc = nix_setup_lso_formats(dev);
1509 otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1514 rc = otx2_nix_rss_config(eth_dev);
1516 otx2_err("Failed to configure rss rc=%d", rc);
1520 /* Init the default TM scheduler hierarchy */
1521 rc = otx2_nix_tm_init_default(eth_dev);
1523 otx2_err("Failed to init traffic manager rc=%d", rc);
1527 rc = otx2_nix_vlan_offload_init(eth_dev);
1529 otx2_err("Failed to init vlan offload rc=%d", rc);
1533 /* Register queue IRQs */
1534 rc = oxt2_nix_register_queue_irqs(eth_dev);
1536 otx2_err("Failed to register queue interrupts rc=%d", rc);
1540 /* Register cq IRQs */
1541 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1542 if (eth_dev->data->nb_rx_queues > dev->cints) {
1543 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1547 /* Rx interrupt feature cannot work with vector mode because,
1548 * vector mode doesn't process packets unless min 4 pkts are
1549 * received, while cq interrupts are generated even for 1 pkt
1552 dev->scalar_ena = true;
1554 rc = oxt2_nix_register_cq_irqs(eth_dev);
1556 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1561 /* Configure loop back mode */
1562 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1564 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1568 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1570 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1575 * Restore queue config when reconfigure followed by
1576 * reconfigure and no queue configure invoked from application case.
1578 if (dev->configured == 1) {
1579 rc = nix_restore_queue_cfg(eth_dev);
1584 /* Update the mac address */
1585 ea = eth_dev->data->mac_addrs;
1586 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1587 if (rte_is_zero_ether_addr(ea))
1588 rte_eth_random_addr((uint8_t *)ea);
1590 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1592 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1593 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1594 " rx_flags=0x%x tx_flags=0x%x",
1595 eth_dev->data->port_id, ea_fmt, nb_rxq,
1596 nb_txq, dev->rx_offloads, dev->tx_offloads,
1597 dev->rx_offload_flags, dev->tx_offload_flags);
1600 dev->configured = 1;
1601 dev->configured_nb_rx_qs = data->nb_rx_queues;
1602 dev->configured_nb_tx_qs = data->nb_tx_queues;
1606 oxt2_nix_unregister_cq_irqs(eth_dev);
1608 oxt2_nix_unregister_queue_irqs(eth_dev);
1610 otx2_nix_vlan_fini(eth_dev);
1612 otx2_nix_tm_fini(eth_dev);
1616 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
1617 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
1619 dev->configured = 0;
1624 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1626 struct rte_eth_dev_data *data = eth_dev->data;
1627 struct otx2_eth_txq *txq;
1630 txq = eth_dev->data->tx_queues[qidx];
1632 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1635 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1637 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
1642 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1649 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1651 struct rte_eth_dev_data *data = eth_dev->data;
1652 struct otx2_eth_txq *txq;
1655 txq = eth_dev->data->tx_queues[qidx];
1657 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1660 txq->fc_cache_pkts = 0;
1662 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1664 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
1669 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1676 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1678 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1679 struct rte_eth_dev_data *data = eth_dev->data;
1682 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1685 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
1687 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
1691 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1698 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1700 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1701 struct rte_eth_dev_data *data = eth_dev->data;
1704 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1707 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
1709 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
1713 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1720 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
1722 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1723 struct rte_mbuf *rx_pkts[32];
1724 struct otx2_eth_rxq *rxq;
1725 int count, i, j, rc;
1727 nix_cgx_stop_link_event(dev);
1728 npc_rx_disable(dev);
1730 /* Stop rx queues and free up pkts pending */
1731 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1732 rc = otx2_nix_rx_queue_stop(eth_dev, i);
1736 rxq = eth_dev->data->rx_queues[i];
1737 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1739 for (j = 0; j < count; j++)
1740 rte_pktmbuf_free(rx_pkts[j]);
1741 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1745 /* Stop tx queues */
1746 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1747 otx2_nix_tx_queue_stop(eth_dev, i);
1751 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
1753 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1756 if (eth_dev->data->nb_rx_queues != 0) {
1757 rc = otx2_nix_recalc_mtu(eth_dev);
1762 /* Start rx queues */
1763 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1764 rc = otx2_nix_rx_queue_start(eth_dev, i);
1769 /* Start tx queues */
1770 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1771 rc = otx2_nix_tx_queue_start(eth_dev, i);
1776 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
1778 otx2_err("Failed to update flow ctrl mode %d", rc);
1782 /* Enable PTP if it was requested by the app or if it is already
1783 * enabled in PF owning this VF
1785 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
1786 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
1787 otx2_ethdev_is_ptp_en(dev))
1788 otx2_nix_timesync_enable(eth_dev);
1790 otx2_nix_timesync_disable(eth_dev);
1792 rc = npc_rx_enable(dev);
1794 otx2_err("Failed to enable NPC rx %d", rc);
1798 otx2_nix_toggle_flag_link_cfg(dev, true);
1800 rc = nix_cgx_start_link_event(dev);
1802 otx2_err("Failed to start cgx link event %d", rc);
1806 otx2_nix_toggle_flag_link_cfg(dev, false);
1807 otx2_eth_set_tx_function(eth_dev);
1808 otx2_eth_set_rx_function(eth_dev);
1813 npc_rx_disable(dev);
1814 otx2_nix_toggle_flag_link_cfg(dev, false);
1818 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
1819 static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
1821 /* Initialize and register driver with DPDK Application */
1822 static const struct eth_dev_ops otx2_eth_dev_ops = {
1823 .dev_infos_get = otx2_nix_info_get,
1824 .dev_configure = otx2_nix_configure,
1825 .link_update = otx2_nix_link_update,
1826 .tx_queue_setup = otx2_nix_tx_queue_setup,
1827 .tx_queue_release = otx2_nix_tx_queue_release,
1828 .rx_queue_setup = otx2_nix_rx_queue_setup,
1829 .rx_queue_release = otx2_nix_rx_queue_release,
1830 .dev_start = otx2_nix_dev_start,
1831 .dev_stop = otx2_nix_dev_stop,
1832 .dev_close = otx2_nix_dev_close,
1833 .tx_queue_start = otx2_nix_tx_queue_start,
1834 .tx_queue_stop = otx2_nix_tx_queue_stop,
1835 .rx_queue_start = otx2_nix_rx_queue_start,
1836 .rx_queue_stop = otx2_nix_rx_queue_stop,
1837 .dev_set_link_up = otx2_nix_dev_set_link_up,
1838 .dev_set_link_down = otx2_nix_dev_set_link_down,
1839 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
1840 .dev_reset = otx2_nix_dev_reset,
1841 .stats_get = otx2_nix_dev_stats_get,
1842 .stats_reset = otx2_nix_dev_stats_reset,
1843 .get_reg = otx2_nix_dev_get_reg,
1844 .mtu_set = otx2_nix_mtu_set,
1845 .mac_addr_add = otx2_nix_mac_addr_add,
1846 .mac_addr_remove = otx2_nix_mac_addr_del,
1847 .mac_addr_set = otx2_nix_mac_addr_set,
1848 .promiscuous_enable = otx2_nix_promisc_enable,
1849 .promiscuous_disable = otx2_nix_promisc_disable,
1850 .allmulticast_enable = otx2_nix_allmulticast_enable,
1851 .allmulticast_disable = otx2_nix_allmulticast_disable,
1852 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
1853 .reta_update = otx2_nix_dev_reta_update,
1854 .reta_query = otx2_nix_dev_reta_query,
1855 .rss_hash_update = otx2_nix_rss_hash_update,
1856 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
1857 .xstats_get = otx2_nix_xstats_get,
1858 .xstats_get_names = otx2_nix_xstats_get_names,
1859 .xstats_reset = otx2_nix_xstats_reset,
1860 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
1861 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
1862 .rxq_info_get = otx2_nix_rxq_info_get,
1863 .txq_info_get = otx2_nix_txq_info_get,
1864 .rx_queue_count = otx2_nix_rx_queue_count,
1865 .rx_descriptor_done = otx2_nix_rx_descriptor_done,
1866 .rx_descriptor_status = otx2_nix_rx_descriptor_status,
1867 .tx_descriptor_status = otx2_nix_tx_descriptor_status,
1868 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
1869 .pool_ops_supported = otx2_nix_pool_ops_supported,
1870 .filter_ctrl = otx2_nix_dev_filter_ctrl,
1871 .get_module_info = otx2_nix_get_module_info,
1872 .get_module_eeprom = otx2_nix_get_module_eeprom,
1873 .fw_version_get = otx2_nix_fw_version_get,
1874 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
1875 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
1876 .timesync_enable = otx2_nix_timesync_enable,
1877 .timesync_disable = otx2_nix_timesync_disable,
1878 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
1879 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
1880 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
1881 .timesync_read_time = otx2_nix_timesync_read_time,
1882 .timesync_write_time = otx2_nix_timesync_write_time,
1883 .vlan_offload_set = otx2_nix_vlan_offload_set,
1884 .vlan_filter_set = otx2_nix_vlan_filter_set,
1885 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
1886 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
1887 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
1888 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
1889 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
1890 .read_clock = otx2_nix_read_clock,
1894 nix_lf_attach(struct otx2_eth_dev *dev)
1896 struct otx2_mbox *mbox = dev->mbox;
1897 struct rsrc_attach_req *req;
1899 /* Attach NIX(lf) */
1900 req = otx2_mbox_alloc_msg_attach_resources(mbox);
1904 return otx2_mbox_process(mbox);
1908 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
1910 struct otx2_mbox *mbox = dev->mbox;
1911 struct msix_offset_rsp *msix_rsp;
1914 /* Get NPA and NIX MSIX vector offsets */
1915 otx2_mbox_alloc_msg_msix_offset(mbox);
1917 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
1919 dev->nix_msixoff = msix_rsp->nix_msixoff;
1925 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
1927 struct rsrc_detach_req *req;
1929 req = otx2_mbox_alloc_msg_detach_resources(mbox);
1931 /* Detach all except npa lf */
1932 req->partial = true;
1939 return otx2_mbox_process(mbox);
1943 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
1945 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1946 struct rte_pci_device *pci_dev;
1947 int rc, max_entries;
1949 eth_dev->dev_ops = &otx2_eth_dev_ops;
1951 /* For secondary processes, the primary has done all the work */
1952 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1953 /* Setup callbacks for secondary process */
1954 otx2_eth_set_tx_function(eth_dev);
1955 otx2_eth_set_rx_function(eth_dev);
1959 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1961 rte_eth_copy_pci_info(eth_dev, pci_dev);
1962 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1964 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
1965 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
1966 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
1968 /* Parse devargs string */
1969 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
1971 otx2_err("Failed to parse devargs rc=%d", rc);
1975 if (!dev->mbox_active) {
1976 /* Initialize the base otx2_dev object
1977 * only if already present
1979 rc = otx2_dev_init(pci_dev, dev);
1981 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
1985 /* Device generic callbacks */
1986 dev->ops = &otx2_dev_ops;
1987 dev->eth_dev = eth_dev;
1989 /* Grab the NPA LF if required */
1990 rc = otx2_npa_lf_init(pci_dev, dev);
1992 goto otx2_dev_uninit;
1994 dev->configured = 0;
1995 dev->drv_inited = true;
1996 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
1997 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2000 rc = nix_lf_attach(dev);
2002 goto otx2_npa_uninit;
2004 /* Get NIX MSIX offset */
2005 rc = nix_lf_get_msix_offset(dev);
2007 goto otx2_npa_uninit;
2009 /* Register LF irq handlers */
2010 rc = otx2_nix_register_irqs(eth_dev);
2014 /* Get maximum number of supported MAC entries */
2015 max_entries = otx2_cgx_mac_max_entries_get(dev);
2016 if (max_entries < 0) {
2017 otx2_err("Failed to get max entries for mac addr");
2019 goto unregister_irq;
2022 /* For VFs, returned max_entries will be 0. But to keep default MAC
2023 * address, one entry must be allocated. So setting up to 1.
2025 if (max_entries == 0)
2028 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2029 RTE_ETHER_ADDR_LEN, 0);
2030 if (eth_dev->data->mac_addrs == NULL) {
2031 otx2_err("Failed to allocate memory for mac addr");
2033 goto unregister_irq;
2036 dev->max_mac_entries = max_entries;
2038 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2040 goto free_mac_addrs;
2042 /* Update the mac address */
2043 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2045 /* Also sync same MAC address to CGX table */
2046 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
2048 /* Initialize the tm data structures */
2049 otx2_nix_tm_conf_init(eth_dev);
2051 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2052 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2054 if (otx2_dev_is_96xx_A0(dev) ||
2055 otx2_dev_is_95xx_Ax(dev)) {
2056 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2057 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2060 /* Initialize rte-flow */
2061 rc = otx2_flow_init(dev);
2063 goto free_mac_addrs;
2065 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2066 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2067 eth_dev->data->port_id, dev->pf, dev->vf,
2068 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2069 dev->rx_offload_capa, dev->tx_offload_capa);
2073 rte_free(eth_dev->data->mac_addrs);
2075 otx2_nix_unregister_irqs(eth_dev);
2077 otx2_eth_dev_lf_detach(dev->mbox);
2081 otx2_dev_fini(pci_dev, dev);
2083 otx2_err("Failed to init nix eth_dev rc=%d", rc);
2088 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2090 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2091 struct rte_pci_device *pci_dev;
2094 /* Nothing to be done for secondary processes */
2095 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2098 /* Clear the flag since we are closing down */
2099 dev->configured = 0;
2101 /* Disable nix bpid config */
2102 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2104 npc_rx_disable(dev);
2106 /* Disable vlan offloads */
2107 otx2_nix_vlan_fini(eth_dev);
2109 /* Disable other rte_flow entries */
2110 otx2_flow_fini(dev);
2112 /* Disable PTP if already enabled */
2113 if (otx2_ethdev_is_ptp_en(dev))
2114 otx2_nix_timesync_disable(eth_dev);
2116 nix_cgx_stop_link_event(dev);
2119 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2120 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
2121 eth_dev->data->tx_queues[i] = NULL;
2123 eth_dev->data->nb_tx_queues = 0;
2125 /* Free up RQ's and CQ's */
2126 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2127 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
2128 eth_dev->data->rx_queues[i] = NULL;
2130 eth_dev->data->nb_rx_queues = 0;
2132 /* Free tm resources */
2133 rc = otx2_nix_tm_fini(eth_dev);
2135 otx2_err("Failed to cleanup tm, rc=%d", rc);
2137 /* Unregister queue irqs */
2138 oxt2_nix_unregister_queue_irqs(eth_dev);
2140 /* Unregister cq irqs */
2141 if (eth_dev->data->dev_conf.intr_conf.rxq)
2142 oxt2_nix_unregister_cq_irqs(eth_dev);
2144 rc = nix_lf_free(dev);
2146 otx2_err("Failed to free nix lf, rc=%d", rc);
2148 rc = otx2_npa_lf_fini();
2150 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2152 rte_free(eth_dev->data->mac_addrs);
2153 eth_dev->data->mac_addrs = NULL;
2154 dev->drv_inited = false;
2156 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2157 otx2_nix_unregister_irqs(eth_dev);
2159 rc = otx2_eth_dev_lf_detach(dev->mbox);
2161 otx2_err("Failed to detach resources, rc=%d", rc);
2163 /* Check if mbox close is needed */
2167 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2168 /* Will be freed later by PMD */
2169 eth_dev->data->dev_private = NULL;
2173 otx2_dev_fini(pci_dev, dev);
2178 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2180 otx2_eth_dev_uninit(eth_dev, true);
2184 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2188 rc = otx2_eth_dev_uninit(eth_dev, false);
2192 return otx2_eth_dev_init(eth_dev);
2196 nix_remove(struct rte_pci_device *pci_dev)
2198 struct rte_eth_dev *eth_dev;
2199 struct otx2_idev_cfg *idev;
2200 struct otx2_dev *otx2_dev;
2203 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2205 /* Cleanup eth dev */
2206 rc = otx2_eth_dev_uninit(eth_dev, true);
2210 rte_eth_dev_pci_release(eth_dev);
2213 /* Nothing to be done for secondary processes */
2214 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2217 /* Check for common resources */
2218 idev = otx2_intra_dev_get_cfg();
2219 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2222 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2224 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2227 /* Safe to cleanup mbox as no more users */
2228 otx2_dev_fini(pci_dev, otx2_dev);
2233 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2238 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2242 RTE_SET_USED(pci_drv);
2244 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2247 /* On error on secondary, recheck if port exists in primary or
2248 * in mid of detach state.
2250 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2251 if (!rte_eth_dev_allocated(pci_dev->device.name))
2256 static const struct rte_pci_id pci_nix_map[] = {
2258 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2261 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2264 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2265 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2272 static struct rte_pci_driver pci_nix = {
2273 .id_table = pci_nix_map,
2274 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2275 RTE_PCI_DRV_INTR_LSC,
2277 .remove = nix_remove,
2280 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
2281 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
2282 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");