1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
16 static inline uint64_t
17 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
19 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
21 if (otx2_dev_is_vf(dev))
22 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
27 static inline uint64_t
28 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
30 uint64_t capa = NIX_TX_OFFLOAD_CAPA;
32 /* TSO not supported for earlier chip revisions */
33 if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
34 capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
35 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
36 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
37 DEV_TX_OFFLOAD_GRE_TNL_TSO);
41 static const struct otx2_dev_ops otx2_dev_ops = {
42 .link_status_update = otx2_eth_dev_link_status_update,
43 .ptp_info_update = otx2_eth_dev_ptp_info_update
47 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
49 struct otx2_mbox *mbox = dev->mbox;
50 struct nix_lf_alloc_req *req;
51 struct nix_lf_alloc_rsp *rsp;
54 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
58 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
59 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
60 req->xqe_sz = NIX_XQESZ_W16;
61 req->rss_sz = dev->rss_info.rss_size;
62 req->rss_grps = NIX_RSS_GRPS;
63 req->npa_func = otx2_npa_pf_func_get();
64 req->sso_func = otx2_sso_pf_func_get();
65 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
66 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
67 DEV_RX_OFFLOAD_UDP_CKSUM)) {
68 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
69 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
71 req->rx_cfg |= BIT_ULL(32 /* DROP_RE */);
73 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
77 dev->sqb_size = rsp->sqb_size;
78 dev->tx_chan_base = rsp->tx_chan_base;
79 dev->rx_chan_base = rsp->rx_chan_base;
80 dev->rx_chan_cnt = rsp->rx_chan_cnt;
81 dev->tx_chan_cnt = rsp->tx_chan_cnt;
82 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
83 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
84 dev->lf_tx_stats = rsp->lf_tx_stats;
85 dev->lf_rx_stats = rsp->lf_rx_stats;
86 dev->cints = rsp->cints;
87 dev->qints = rsp->qints;
88 dev->npc_flow.channel = dev->rx_chan_base;
94 nix_lf_free(struct otx2_eth_dev *dev)
96 struct otx2_mbox *mbox = dev->mbox;
97 struct nix_lf_free_req *req;
98 struct ndc_sync_op *ndc_req;
101 /* Sync NDC-NIX for LF */
102 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
103 ndc_req->nix_lf_tx_sync = 1;
104 ndc_req->nix_lf_rx_sync = 1;
105 rc = otx2_mbox_process(mbox);
107 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
109 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
110 /* Let AF driver free all this nix lf's
111 * NPC entries allocated using NPC MBOX.
115 return otx2_mbox_process(mbox);
119 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
121 struct otx2_mbox *mbox = dev->mbox;
123 if (otx2_dev_is_vf(dev))
126 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
128 return otx2_mbox_process(mbox);
132 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
134 struct otx2_mbox *mbox = dev->mbox;
136 if (otx2_dev_is_vf(dev))
139 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
141 return otx2_mbox_process(mbox);
145 npc_rx_enable(struct otx2_eth_dev *dev)
147 struct otx2_mbox *mbox = dev->mbox;
149 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
151 return otx2_mbox_process(mbox);
155 npc_rx_disable(struct otx2_eth_dev *dev)
157 struct otx2_mbox *mbox = dev->mbox;
159 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
161 return otx2_mbox_process(mbox);
165 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
167 struct otx2_mbox *mbox = dev->mbox;
169 if (otx2_dev_is_vf(dev))
172 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
174 return otx2_mbox_process(mbox);
178 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
180 struct otx2_mbox *mbox = dev->mbox;
182 if (otx2_dev_is_vf(dev))
186 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
188 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
190 return otx2_mbox_process(mbox);
194 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
196 struct otx2_mbox *mbox = dev->mbox;
198 if (otx2_dev_is_vf(dev))
201 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
203 return otx2_mbox_process(mbox);
207 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
213 static inline uint32_t
214 nix_qsize_to_val(enum nix_q_size_e qsize)
216 return (16UL << (qsize * 2));
219 static inline enum nix_q_size_e
220 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
224 if (otx2_ethdev_fixup_is_min_4k_q(dev))
229 for (; i < nix_q_size_max; i++)
230 if (val <= nix_qsize_to_val(i))
233 if (i >= nix_q_size_max)
234 i = nix_q_size_max - 1;
240 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
241 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
243 struct otx2_mbox *mbox = dev->mbox;
244 const struct rte_memzone *rz;
245 uint32_t ring_size, cq_size;
246 struct nix_aq_enq_req *aq;
251 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
252 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
253 NIX_CQ_ALIGN, dev->node);
255 otx2_err("Failed to allocate mem for cq hw ring");
259 memset(rz->addr, 0, rz->len);
260 rxq->desc = (uintptr_t)rz->addr;
261 rxq->qmask = cq_size - 1;
263 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
265 aq->ctype = NIX_AQ_CTYPE_CQ;
266 aq->op = NIX_AQ_INSTOP_INIT;
270 aq->cq.qsize = rxq->qsize;
271 aq->cq.base = rz->iova;
272 aq->cq.avg_level = 0xff;
273 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
274 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
276 /* Many to one reduction */
277 aq->cq.qint_idx = qid % dev->qints;
278 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
279 aq->cq.cint_idx = qid;
281 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
282 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
283 uint16_t min_rx_drop;
285 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
286 aq->cq.drop = min_rx_drop;
288 rxq->cq_drop = min_rx_drop;
290 rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
291 aq->cq.drop = rxq->cq_drop;
295 /* TX pause frames enable flowctrl on RX side */
296 if (dev->fc_info.tx_pause) {
297 /* Single bpid is allocated for all rx channels for now */
298 aq->cq.bpid = dev->fc_info.bpid[0];
299 aq->cq.bp = rxq->cq_drop;
303 rc = otx2_mbox_process(mbox);
305 otx2_err("Failed to init cq context");
309 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
311 aq->ctype = NIX_AQ_CTYPE_RQ;
312 aq->op = NIX_AQ_INSTOP_INIT;
315 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
317 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
318 first_skip = (sizeof(struct rte_mbuf));
319 first_skip += RTE_PKTMBUF_HEADROOM;
320 first_skip += rte_pktmbuf_priv_size(mp);
321 rxq->data_off = first_skip;
323 first_skip /= 8; /* Expressed in number of dwords */
324 aq->rq.first_skip = first_skip;
325 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
326 aq->rq.flow_tagw = 32; /* 32-bits */
327 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
328 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
329 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
330 aq->rq.lpb_sizem1 /= 8;
331 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
333 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
334 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
335 aq->rq.rq_int_ena = 0;
336 /* Many to one reduction */
337 aq->rq.qint_idx = qid % dev->qints;
339 aq->rq.xqe_drop_ena = 1;
341 rc = otx2_mbox_process(mbox);
343 otx2_err("Failed to init rq context");
353 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
354 struct otx2_eth_rxq *rxq, const bool enb)
356 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
357 struct otx2_mbox *mbox = dev->mbox;
358 struct nix_aq_enq_req *aq;
360 /* Pkts will be dropped silently if RQ is disabled */
361 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
363 aq->ctype = NIX_AQ_CTYPE_RQ;
364 aq->op = NIX_AQ_INSTOP_WRITE;
367 aq->rq_mask.ena = ~(aq->rq_mask.ena);
369 return otx2_mbox_process(mbox);
373 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
375 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
376 struct otx2_mbox *mbox = dev->mbox;
377 struct nix_aq_enq_req *aq;
380 /* RQ is already disabled */
382 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
384 aq->ctype = NIX_AQ_CTYPE_CQ;
385 aq->op = NIX_AQ_INSTOP_WRITE;
388 aq->cq_mask.ena = ~(aq->cq_mask.ena);
390 rc = otx2_mbox_process(mbox);
392 otx2_err("Failed to disable cq context");
400 nix_get_data_off(struct otx2_eth_dev *dev)
402 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
406 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
408 struct rte_mbuf mb_def;
411 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
412 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
413 offsetof(struct rte_mbuf, data_off) != 2);
414 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
415 offsetof(struct rte_mbuf, data_off) != 4);
416 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
417 offsetof(struct rte_mbuf, data_off) != 6);
419 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
420 mb_def.port = port_id;
421 rte_mbuf_refcnt_set(&mb_def, 1);
423 /* Prevent compiler reordering: rearm_data covers previous fields */
424 rte_compiler_barrier();
425 tmp = (uint64_t *)&mb_def.rearm_data;
431 otx2_nix_rx_queue_release(void *rx_queue)
433 struct otx2_eth_rxq *rxq = rx_queue;
438 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
439 nix_cq_rq_uninit(rxq->eth_dev, rxq);
444 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
445 uint16_t nb_desc, unsigned int socket,
446 const struct rte_eth_rxconf *rx_conf,
447 struct rte_mempool *mp)
449 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
450 struct rte_mempool_ops *ops;
451 struct otx2_eth_rxq *rxq;
452 const char *platform_ops;
453 enum nix_q_size_e qsize;
459 /* Compile time check to make sure all fast path elements in a CL */
460 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
463 if (rx_conf->rx_deferred_start == 1) {
464 otx2_err("Deferred Rx start is not supported");
468 platform_ops = rte_mbuf_platform_mempool_ops();
469 /* This driver needs octeontx2_npa mempool ops to work */
470 ops = rte_mempool_get_ops(mp->ops_index);
471 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
472 otx2_err("mempool ops should be of octeontx2_npa type");
476 if (mp->pool_id == 0) {
477 otx2_err("Invalid pool_id");
481 /* Free memory prior to re-allocation if needed */
482 if (eth_dev->data->rx_queues[rq] != NULL) {
483 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
484 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
485 eth_dev->data->rx_queues[rq] = NULL;
488 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
489 dev->rx_offloads |= offloads;
491 /* Find the CQ queue size */
492 qsize = nix_qsize_clampup_get(dev, nb_desc);
493 /* Allocate rxq memory */
494 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
496 otx2_err("Failed to allocate rq=%d", rq);
501 rxq->eth_dev = eth_dev;
503 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
504 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
505 rxq->wdata = (uint64_t)rq << 32;
506 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
507 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
508 eth_dev->data->port_id);
509 rxq->offloads = offloads;
511 rxq->qlen = nix_qsize_to_val(qsize);
513 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
514 rxq->tstamp = &dev->tstamp;
516 /* Alloc completion queue */
517 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
519 otx2_err("Failed to allocate rxq=%u", rq);
523 rxq->qconf.socket_id = socket;
524 rxq->qconf.nb_desc = nb_desc;
525 rxq->qconf.mempool = mp;
526 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
528 nix_rx_queue_reset(rxq);
529 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
530 rq, mp->name, qsize, nb_desc, rxq->qlen);
532 eth_dev->data->rx_queues[rq] = rxq;
533 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
535 /* Calculating delta and freq mult between PTP HI clock and tsc.
536 * These are needed in deriving raw clock value from tsc counter.
537 * read_clock eth op returns raw clock value.
539 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
540 otx2_ethdev_is_ptp_en(dev)) {
541 rc = otx2_nix_raw_clock_tsc_conv(dev);
543 otx2_err("Failed to calculate delta and freq mult");
551 otx2_nix_rx_queue_release(rxq);
556 static inline uint8_t
557 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
560 * Maximum three segments can be supported with W8, Choose
561 * NIX_MAXSQESZ_W16 for multi segment offload.
563 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
564 return NIX_MAXSQESZ_W16;
566 return NIX_MAXSQESZ_W8;
570 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
572 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
573 struct rte_eth_dev_data *data = eth_dev->data;
574 struct rte_eth_conf *conf = &data->dev_conf;
575 struct rte_eth_rxmode *rxmode = &conf->rxmode;
578 if (rxmode->mq_mode == ETH_MQ_RX_RSS)
579 flags |= NIX_RX_OFFLOAD_RSS_F;
581 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
582 DEV_RX_OFFLOAD_UDP_CKSUM))
583 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
585 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
586 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
587 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
589 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
590 flags |= NIX_RX_MULTI_SEG_F;
592 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
593 DEV_RX_OFFLOAD_QINQ_STRIP))
594 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
596 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
597 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
603 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
605 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
606 uint64_t conf = dev->tx_offloads;
609 /* Fastpath is dependent on these enums */
610 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
611 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
612 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
613 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
614 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
615 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
616 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
617 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
618 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
619 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
620 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
621 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
622 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
623 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
624 offsetof(struct rte_mbuf, buf_iova) + 8);
625 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
626 offsetof(struct rte_mbuf, buf_iova) + 16);
627 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
628 offsetof(struct rte_mbuf, ol_flags) + 12);
629 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
630 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
632 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
633 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
634 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
636 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
637 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
638 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
640 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
641 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
642 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
643 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
644 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
646 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
647 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
649 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
650 flags |= NIX_TX_MULTI_SEG_F;
652 /* Enable Inner checksum for TSO */
653 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
654 flags |= (NIX_TX_OFFLOAD_TSO_F |
655 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
657 /* Enable Inner and Outer checksum for Tunnel TSO */
658 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
659 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
660 DEV_TX_OFFLOAD_GRE_TNL_TSO))
661 flags |= (NIX_TX_OFFLOAD_TSO_F |
662 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
663 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
665 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
666 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
672 nix_sq_init(struct otx2_eth_txq *txq)
674 struct otx2_eth_dev *dev = txq->dev;
675 struct otx2_mbox *mbox = dev->mbox;
676 struct nix_aq_enq_req *sq;
681 if (txq->sqb_pool->pool_id == 0)
684 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
686 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
690 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
692 sq->ctype = NIX_AQ_CTYPE_SQ;
693 sq->op = NIX_AQ_INSTOP_INIT;
694 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
697 sq->sq.smq_rr_quantum = rr_quantum;
698 sq->sq.default_chan = dev->tx_chan_base;
699 sq->sq.sqe_stype = NIX_STYPE_STF;
701 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
702 sq->sq.sqe_stype = NIX_STYPE_STP;
704 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
705 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
706 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
707 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
708 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
710 /* Many to one reduction */
711 sq->sq.qint_idx = txq->sq % dev->qints;
713 return otx2_mbox_process(mbox);
717 nix_sq_uninit(struct otx2_eth_txq *txq)
719 struct otx2_eth_dev *dev = txq->dev;
720 struct otx2_mbox *mbox = dev->mbox;
721 struct ndc_sync_op *ndc_req;
722 struct nix_aq_enq_rsp *rsp;
723 struct nix_aq_enq_req *aq;
724 uint16_t sqes_per_sqb;
728 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
730 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
732 aq->ctype = NIX_AQ_CTYPE_SQ;
733 aq->op = NIX_AQ_INSTOP_READ;
735 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
739 /* Check if sq is already cleaned up */
744 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
746 aq->ctype = NIX_AQ_CTYPE_SQ;
747 aq->op = NIX_AQ_INSTOP_WRITE;
749 aq->sq_mask.ena = ~aq->sq_mask.ena;
752 rc = otx2_mbox_process(mbox);
756 /* Read SQ and free sqb's */
757 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
759 aq->ctype = NIX_AQ_CTYPE_SQ;
760 aq->op = NIX_AQ_INSTOP_READ;
762 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
767 otx2_err("SQ has pending sqe's");
769 count = aq->sq.sqb_count;
770 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
771 /* Free SQB's that are used */
772 sqb_buf = (void *)rsp->sq.head_sqb;
776 next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
777 ((sqes_per_sqb - 1) *
778 nix_sq_max_sqe_sz(txq)));
779 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
785 /* Free next to use sqb */
786 if (rsp->sq.next_sqb)
787 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
790 /* Sync NDC-NIX-TX for LF */
791 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
792 ndc_req->nix_lf_tx_sync = 1;
793 rc = otx2_mbox_process(mbox);
795 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
801 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
803 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
804 struct npa_aq_enq_req *aura_req;
806 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
807 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
808 aura_req->ctype = NPA_AQ_CTYPE_AURA;
809 aura_req->op = NPA_AQ_INSTOP_WRITE;
811 aura_req->aura.limit = nb_sqb_bufs;
812 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
814 return otx2_mbox_process(npa_lf->mbox);
818 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
820 struct otx2_eth_dev *dev = txq->dev;
821 uint16_t sqes_per_sqb, nb_sqb_bufs;
822 char name[RTE_MEMPOOL_NAMESIZE];
823 struct rte_mempool_objsz sz;
824 struct npa_aura_s *aura;
825 uint32_t tmp, blk_sz;
827 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
828 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
829 blk_sz = dev->sqb_size;
831 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
832 sqes_per_sqb = (dev->sqb_size / 8) / 16;
834 sqes_per_sqb = (dev->sqb_size / 8) / 8;
836 nb_sqb_bufs = nb_desc / sqes_per_sqb;
837 /* Clamp up to devarg passed SQB count */
838 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
839 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
841 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
843 MEMPOOL_F_NO_SPREAD);
844 txq->nb_sqb_bufs = nb_sqb_bufs;
845 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
846 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
847 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
848 txq->nb_sqb_bufs_adj =
849 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
851 if (txq->sqb_pool == NULL) {
852 otx2_err("Failed to allocate sqe mempool");
856 memset(aura, 0, sizeof(*aura));
858 aura->fc_addr = txq->fc_iova;
859 aura->fc_hyst_bits = 0; /* Store count on all updates */
860 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
861 otx2_err("Failed to set ops for sqe mempool");
864 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
865 otx2_err("Failed to populate sqe mempool");
869 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
870 if (dev->sqb_size != sz.elt_size) {
871 otx2_err("sqe pool block size is not expected %d != %d",
876 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
884 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
886 struct nix_send_ext_s *send_hdr_ext;
887 struct nix_send_hdr_s *send_hdr;
888 struct nix_send_mem_s *send_mem;
889 union nix_send_sg_s *sg;
891 /* Initialize the fields based on basic single segment packet */
892 memset(&txq->cmd, 0, sizeof(txq->cmd));
894 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
895 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
896 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
897 send_hdr->w0.sizem1 = 2;
899 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
900 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
901 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
902 /* Default: one seg packet would have:
903 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
906 send_hdr->w0.sizem1 = 3;
907 send_hdr_ext->w0.tstmp = 1;
909 /* To calculate the offset for send_mem,
910 * send_hdr->w0.sizem1 * 2
912 send_mem = (struct nix_send_mem_s *)(txq->cmd +
913 (send_hdr->w0.sizem1 << 1));
914 send_mem->subdc = NIX_SUBDC_MEM;
915 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
916 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
918 sg = (union nix_send_sg_s *)&txq->cmd[4];
920 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
921 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
922 send_hdr->w0.sizem1 = 1;
923 sg = (union nix_send_sg_s *)&txq->cmd[2];
926 send_hdr->w0.sq = txq->sq;
927 sg->subdc = NIX_SUBDC_SG;
929 sg->ld_type = NIX_SENDLDTYPE_LDD;
935 otx2_nix_tx_queue_release(void *_txq)
937 struct otx2_eth_txq *txq = _txq;
938 struct rte_eth_dev *eth_dev;
943 eth_dev = txq->dev->eth_dev;
945 otx2_nix_dbg("Releasing txq %u", txq->sq);
947 /* Flush and disable tm */
948 otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
950 /* Free sqb's and disable sq */
954 rte_mempool_free(txq->sqb_pool);
955 txq->sqb_pool = NULL;
962 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
963 uint16_t nb_desc, unsigned int socket_id,
964 const struct rte_eth_txconf *tx_conf)
966 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
967 const struct rte_memzone *fc;
968 struct otx2_eth_txq *txq;
974 /* Compile time check to make sure all fast path elements in a CL */
975 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
977 if (tx_conf->tx_deferred_start) {
978 otx2_err("Tx deferred start is not supported");
982 /* Free memory prior to re-allocation if needed. */
983 if (eth_dev->data->tx_queues[sq] != NULL) {
984 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
985 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
986 eth_dev->data->tx_queues[sq] = NULL;
989 /* Find the expected offloads for this queue */
990 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
992 /* Allocating tx queue data structure */
993 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
994 OTX2_ALIGN, socket_id);
996 otx2_err("Failed to alloc txq=%d", sq);
1002 txq->sqb_pool = NULL;
1003 txq->offloads = offloads;
1004 dev->tx_offloads |= offloads;
1007 * Allocate memory for flow control updates from HW.
1008 * Alloc one cache line, so that fits all FC_STYPE modes.
1010 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1011 OTX2_ALIGN + sizeof(struct npa_aura_s),
1012 OTX2_ALIGN, dev->node);
1014 otx2_err("Failed to allocate mem for fcmem");
1018 txq->fc_iova = fc->iova;
1019 txq->fc_mem = fc->addr;
1021 /* Initialize the aura sqb pool */
1022 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1024 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1028 /* Initialize the SQ */
1029 rc = nix_sq_init(txq);
1031 otx2_err("Failed to init sq=%d context", sq);
1035 txq->fc_cache_pkts = 0;
1036 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1037 /* Evenly distribute LMT slot for each sq */
1038 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1040 txq->qconf.socket_id = socket_id;
1041 txq->qconf.nb_desc = nb_desc;
1042 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1044 otx2_nix_form_default_desc(txq);
1046 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1047 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1048 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1049 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1050 eth_dev->data->tx_queues[sq] = txq;
1051 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1055 otx2_nix_tx_queue_release(txq);
1061 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1063 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1064 struct otx2_eth_qconf *tx_qconf = NULL;
1065 struct otx2_eth_qconf *rx_qconf = NULL;
1066 struct otx2_eth_txq **txq;
1067 struct otx2_eth_rxq **rxq;
1068 int i, nb_rxq, nb_txq;
1070 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1071 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1073 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1074 if (tx_qconf == NULL) {
1075 otx2_err("Failed to allocate memory for tx_qconf");
1079 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1080 if (rx_qconf == NULL) {
1081 otx2_err("Failed to allocate memory for rx_qconf");
1085 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1086 for (i = 0; i < nb_txq; i++) {
1087 if (txq[i] == NULL) {
1088 otx2_err("txq[%d] is already released", i);
1091 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1092 otx2_nix_tx_queue_release(txq[i]);
1093 eth_dev->data->tx_queues[i] = NULL;
1096 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1097 for (i = 0; i < nb_rxq; i++) {
1098 if (rxq[i] == NULL) {
1099 otx2_err("rxq[%d] is already released", i);
1102 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1103 otx2_nix_rx_queue_release(rxq[i]);
1104 eth_dev->data->rx_queues[i] = NULL;
1107 dev->tx_qconf = tx_qconf;
1108 dev->rx_qconf = rx_qconf;
1121 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1123 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1124 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1125 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1126 struct otx2_eth_txq **txq;
1127 struct otx2_eth_rxq **rxq;
1128 int rc, i, nb_rxq, nb_txq;
1130 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1131 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1134 /* Setup tx & rx queues with previous configuration so
1135 * that the queues can be functional in cases like ports
1136 * are started without re configuring queues.
1138 * Usual re config sequence is like below:
1139 * port_configure() {
1144 * queue_configure() {
1151 * In some application's control path, queue_configure() would
1152 * NOT be invoked for TXQs/RXQs in port_configure().
1153 * In such cases, queues can be functional after start as the
1154 * queues are already setup in port_configure().
1156 for (i = 0; i < nb_txq; i++) {
1157 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1158 tx_qconf[i].socket_id,
1159 &tx_qconf[i].conf.tx);
1161 otx2_err("Failed to setup tx queue rc=%d", rc);
1162 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1163 for (i -= 1; i >= 0; i--)
1164 otx2_nix_tx_queue_release(txq[i]);
1169 free(tx_qconf); tx_qconf = NULL;
1171 for (i = 0; i < nb_rxq; i++) {
1172 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1173 rx_qconf[i].socket_id,
1174 &rx_qconf[i].conf.rx,
1175 rx_qconf[i].mempool);
1177 otx2_err("Failed to setup rx queue rc=%d", rc);
1178 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1179 for (i -= 1; i >= 0; i--)
1180 otx2_nix_rx_queue_release(rxq[i]);
1181 goto release_tx_queues;
1185 free(rx_qconf); rx_qconf = NULL;
1190 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1191 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1192 otx2_nix_tx_queue_release(txq[i]);
1203 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1205 RTE_SET_USED(queue);
1206 RTE_SET_USED(mbufs);
1213 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1215 /* These dummy functions are required for supporting
1216 * some applications which reconfigure queues without
1217 * stopping tx burst and rx burst threads(eg kni app)
1218 * When the queues context is saved, txq/rxqs are released
1219 * which caused app crash since rx/tx burst is still
1220 * on different lcores
1222 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1223 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1228 nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1230 volatile struct nix_lso_format *field;
1232 /* Format works only with TCP packet marked by OL3/OL4 */
1233 field = (volatile struct nix_lso_format *)&req->fields[0];
1234 req->field_mask = NIX_LSO_FIELD_MASK;
1235 /* Outer IPv4/IPv6 */
1236 field->layer = NIX_TXLAYER_OL3;
1237 field->offset = v4 ? 2 : 4;
1238 field->sizem1 = 1; /* 2B */
1239 field->alg = NIX_LSOALG_ADD_PAYLEN;
1243 field->layer = NIX_TXLAYER_OL3;
1246 /* Incremented linearly per segment */
1247 field->alg = NIX_LSOALG_ADD_SEGNUM;
1251 /* TCP sequence number update */
1252 field->layer = NIX_TXLAYER_OL4;
1254 field->sizem1 = 3; /* 4 bytes */
1255 field->alg = NIX_LSOALG_ADD_OFFSET;
1257 /* TCP flags field */
1258 field->layer = NIX_TXLAYER_OL4;
1261 field->alg = NIX_LSOALG_TCP_FLAGS;
1266 nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1267 bool outer_v4, bool inner_v4)
1269 volatile struct nix_lso_format *field;
1271 field = (volatile struct nix_lso_format *)&req->fields[0];
1272 req->field_mask = NIX_LSO_FIELD_MASK;
1273 /* Outer IPv4/IPv6 len */
1274 field->layer = NIX_TXLAYER_OL3;
1275 field->offset = outer_v4 ? 2 : 4;
1276 field->sizem1 = 1; /* 2B */
1277 field->alg = NIX_LSOALG_ADD_PAYLEN;
1281 field->layer = NIX_TXLAYER_OL3;
1284 /* Incremented linearly per segment */
1285 field->alg = NIX_LSOALG_ADD_SEGNUM;
1289 /* Outer UDP length */
1290 field->layer = NIX_TXLAYER_OL4;
1293 field->alg = NIX_LSOALG_ADD_PAYLEN;
1296 /* Inner IPv4/IPv6 */
1297 field->layer = NIX_TXLAYER_IL3;
1298 field->offset = inner_v4 ? 2 : 4;
1299 field->sizem1 = 1; /* 2B */
1300 field->alg = NIX_LSOALG_ADD_PAYLEN;
1304 field->layer = NIX_TXLAYER_IL3;
1307 /* Incremented linearly per segment */
1308 field->alg = NIX_LSOALG_ADD_SEGNUM;
1312 /* TCP sequence number update */
1313 field->layer = NIX_TXLAYER_IL4;
1315 field->sizem1 = 3; /* 4 bytes */
1316 field->alg = NIX_LSOALG_ADD_OFFSET;
1319 /* TCP flags field */
1320 field->layer = NIX_TXLAYER_IL4;
1323 field->alg = NIX_LSOALG_TCP_FLAGS;
1328 nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
1329 bool outer_v4, bool inner_v4)
1331 volatile struct nix_lso_format *field;
1333 field = (volatile struct nix_lso_format *)&req->fields[0];
1334 req->field_mask = NIX_LSO_FIELD_MASK;
1335 /* Outer IPv4/IPv6 len */
1336 field->layer = NIX_TXLAYER_OL3;
1337 field->offset = outer_v4 ? 2 : 4;
1338 field->sizem1 = 1; /* 2B */
1339 field->alg = NIX_LSOALG_ADD_PAYLEN;
1343 field->layer = NIX_TXLAYER_OL3;
1346 /* Incremented linearly per segment */
1347 field->alg = NIX_LSOALG_ADD_SEGNUM;
1351 /* Inner IPv4/IPv6 */
1352 field->layer = NIX_TXLAYER_IL3;
1353 field->offset = inner_v4 ? 2 : 4;
1354 field->sizem1 = 1; /* 2B */
1355 field->alg = NIX_LSOALG_ADD_PAYLEN;
1359 field->layer = NIX_TXLAYER_IL3;
1362 /* Incremented linearly per segment */
1363 field->alg = NIX_LSOALG_ADD_SEGNUM;
1367 /* TCP sequence number update */
1368 field->layer = NIX_TXLAYER_IL4;
1370 field->sizem1 = 3; /* 4 bytes */
1371 field->alg = NIX_LSOALG_ADD_OFFSET;
1374 /* TCP flags field */
1375 field->layer = NIX_TXLAYER_IL4;
1378 field->alg = NIX_LSOALG_TCP_FLAGS;
1383 nix_setup_lso_formats(struct otx2_eth_dev *dev)
1385 struct otx2_mbox *mbox = dev->mbox;
1386 struct nix_lso_format_cfg_rsp *rsp;
1387 struct nix_lso_format_cfg *req;
1391 /* Skip if TSO was not requested */
1392 if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1397 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1398 nix_lso_tcp(req, true);
1399 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1403 base = rsp->lso_format_idx;
1404 if (base != NIX_LSO_FORMAT_IDX_TSOV4)
1406 dev->lso_base_idx = base;
1407 otx2_nix_dbg("tcpv4 lso fmt=%u", base);
1413 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1414 nix_lso_tcp(req, false);
1415 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1419 if (rsp->lso_format_idx != base + 1)
1421 otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
1424 * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1426 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1427 nix_lso_udp_tun_tcp(req, true, true);
1428 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1432 if (rsp->lso_format_idx != base + 2)
1434 otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
1437 * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1439 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1440 nix_lso_udp_tun_tcp(req, true, false);
1441 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1445 if (rsp->lso_format_idx != base + 3)
1447 otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
1450 * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1452 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1453 nix_lso_udp_tun_tcp(req, false, true);
1454 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1458 if (rsp->lso_format_idx != base + 4)
1460 otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
1463 * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1465 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1466 nix_lso_udp_tun_tcp(req, false, false);
1467 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1470 if (rsp->lso_format_idx != base + 5)
1472 otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
1475 * IPv4/TUN HDR/IPv4/TCP LSO
1477 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1478 nix_lso_tun_tcp(req, true, true);
1479 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1483 if (rsp->lso_format_idx != base + 6)
1485 otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
1488 * IPv4/TUN HDR/IPv6/TCP LSO
1490 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1491 nix_lso_tun_tcp(req, true, false);
1492 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1496 if (rsp->lso_format_idx != base + 7)
1498 otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
1501 * IPv6/TUN HDR/IPv4/TCP LSO
1503 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1504 nix_lso_tun_tcp(req, false, true);
1505 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1509 if (rsp->lso_format_idx != base + 8)
1511 otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
1514 * IPv6/TUN HDR/IPv6/TCP LSO
1516 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1517 nix_lso_tun_tcp(req, false, false);
1518 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1521 if (rsp->lso_format_idx != base + 9)
1523 otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
1528 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1530 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1531 struct rte_eth_dev_data *data = eth_dev->data;
1532 struct rte_eth_conf *conf = &data->dev_conf;
1533 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1534 struct rte_eth_txmode *txmode = &conf->txmode;
1535 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1536 struct rte_ether_addr *ea;
1537 uint8_t nb_rxq, nb_txq;
1543 if (rte_eal_has_hugepages() == 0) {
1544 otx2_err("Huge page is not configured");
1545 goto fail_configure;
1548 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1549 otx2_err("Setting link speed/duplex not supported");
1550 goto fail_configure;
1553 if (conf->dcb_capability_en == 1) {
1554 otx2_err("dcb enable is not supported");
1555 goto fail_configure;
1558 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1559 otx2_err("Flow director is not supported");
1560 goto fail_configure;
1563 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1564 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1565 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1566 goto fail_configure;
1569 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1570 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1571 goto fail_configure;
1574 if (otx2_dev_is_Ax(dev) &&
1575 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1576 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1577 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1578 otx2_err("Outer IP and SCTP checksum unsupported");
1579 goto fail_configure;
1582 /* Free the resources allocated from the previous configure */
1583 if (dev->configured == 1) {
1584 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1585 otx2_nix_vlan_fini(eth_dev);
1586 otx2_nix_mc_addr_list_uninstall(eth_dev);
1587 otx2_flow_free_all_resources(dev);
1588 oxt2_nix_unregister_queue_irqs(eth_dev);
1589 if (eth_dev->data->dev_conf.intr_conf.rxq)
1590 oxt2_nix_unregister_cq_irqs(eth_dev);
1591 nix_set_nop_rxtx_function(eth_dev);
1592 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1594 goto fail_configure;
1595 otx2_nix_tm_fini(eth_dev);
1599 dev->rx_offloads = rxmode->offloads;
1600 dev->tx_offloads = txmode->offloads;
1601 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1602 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1603 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1605 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1606 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1608 /* Alloc a nix lf */
1609 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1611 otx2_err("Failed to init nix_lf rc=%d", rc);
1615 rc = nix_setup_lso_formats(dev);
1617 otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1622 rc = otx2_nix_rss_config(eth_dev);
1624 otx2_err("Failed to configure rss rc=%d", rc);
1628 /* Init the default TM scheduler hierarchy */
1629 rc = otx2_nix_tm_init_default(eth_dev);
1631 otx2_err("Failed to init traffic manager rc=%d", rc);
1635 rc = otx2_nix_vlan_offload_init(eth_dev);
1637 otx2_err("Failed to init vlan offload rc=%d", rc);
1641 /* Register queue IRQs */
1642 rc = oxt2_nix_register_queue_irqs(eth_dev);
1644 otx2_err("Failed to register queue interrupts rc=%d", rc);
1648 /* Register cq IRQs */
1649 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1650 if (eth_dev->data->nb_rx_queues > dev->cints) {
1651 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1655 /* Rx interrupt feature cannot work with vector mode because,
1656 * vector mode doesn't process packets unless min 4 pkts are
1657 * received, while cq interrupts are generated even for 1 pkt
1660 dev->scalar_ena = true;
1662 rc = oxt2_nix_register_cq_irqs(eth_dev);
1664 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1669 /* Configure loop back mode */
1670 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1672 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1676 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1678 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1682 rc = otx2_nix_mc_addr_list_install(eth_dev);
1684 otx2_err("Failed to install mc address list rc=%d", rc);
1689 * Restore queue config when reconfigure followed by
1690 * reconfigure and no queue configure invoked from application case.
1692 if (dev->configured == 1) {
1693 rc = nix_restore_queue_cfg(eth_dev);
1695 goto uninstall_mc_list;
1698 /* Update the mac address */
1699 ea = eth_dev->data->mac_addrs;
1700 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1701 if (rte_is_zero_ether_addr(ea))
1702 rte_eth_random_addr((uint8_t *)ea);
1704 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1706 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1707 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1708 " rx_flags=0x%x tx_flags=0x%x",
1709 eth_dev->data->port_id, ea_fmt, nb_rxq,
1710 nb_txq, dev->rx_offloads, dev->tx_offloads,
1711 dev->rx_offload_flags, dev->tx_offload_flags);
1714 dev->configured = 1;
1715 dev->configured_nb_rx_qs = data->nb_rx_queues;
1716 dev->configured_nb_tx_qs = data->nb_tx_queues;
1720 otx2_nix_mc_addr_list_uninstall(eth_dev);
1722 oxt2_nix_unregister_cq_irqs(eth_dev);
1724 oxt2_nix_unregister_queue_irqs(eth_dev);
1726 otx2_nix_vlan_fini(eth_dev);
1728 otx2_nix_tm_fini(eth_dev);
1732 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
1733 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
1735 dev->configured = 0;
1740 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1742 struct rte_eth_dev_data *data = eth_dev->data;
1743 struct otx2_eth_txq *txq;
1746 txq = eth_dev->data->tx_queues[qidx];
1748 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1751 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1753 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
1758 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1765 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1767 struct rte_eth_dev_data *data = eth_dev->data;
1768 struct otx2_eth_txq *txq;
1771 txq = eth_dev->data->tx_queues[qidx];
1773 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1776 txq->fc_cache_pkts = 0;
1778 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1780 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
1785 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1792 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1794 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1795 struct rte_eth_dev_data *data = eth_dev->data;
1798 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1801 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
1803 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
1807 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1814 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1816 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1817 struct rte_eth_dev_data *data = eth_dev->data;
1820 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1823 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
1825 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
1829 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1836 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
1838 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1839 struct rte_mbuf *rx_pkts[32];
1840 struct otx2_eth_rxq *rxq;
1841 int count, i, j, rc;
1843 nix_cgx_stop_link_event(dev);
1844 npc_rx_disable(dev);
1846 /* Stop rx queues and free up pkts pending */
1847 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1848 rc = otx2_nix_rx_queue_stop(eth_dev, i);
1852 rxq = eth_dev->data->rx_queues[i];
1853 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1855 for (j = 0; j < count; j++)
1856 rte_pktmbuf_free(rx_pkts[j]);
1857 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1861 /* Stop tx queues */
1862 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1863 otx2_nix_tx_queue_stop(eth_dev, i);
1867 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
1869 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1872 if (eth_dev->data->nb_rx_queues != 0) {
1873 rc = otx2_nix_recalc_mtu(eth_dev);
1878 /* Start rx queues */
1879 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1880 rc = otx2_nix_rx_queue_start(eth_dev, i);
1885 /* Start tx queues */
1886 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1887 rc = otx2_nix_tx_queue_start(eth_dev, i);
1892 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
1894 otx2_err("Failed to update flow ctrl mode %d", rc);
1898 /* Enable PTP if it was requested by the app or if it is already
1899 * enabled in PF owning this VF
1901 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
1902 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
1903 otx2_ethdev_is_ptp_en(dev))
1904 otx2_nix_timesync_enable(eth_dev);
1906 otx2_nix_timesync_disable(eth_dev);
1908 rc = npc_rx_enable(dev);
1910 otx2_err("Failed to enable NPC rx %d", rc);
1914 otx2_nix_toggle_flag_link_cfg(dev, true);
1916 rc = nix_cgx_start_link_event(dev);
1918 otx2_err("Failed to start cgx link event %d", rc);
1922 otx2_nix_toggle_flag_link_cfg(dev, false);
1923 otx2_eth_set_tx_function(eth_dev);
1924 otx2_eth_set_rx_function(eth_dev);
1929 npc_rx_disable(dev);
1930 otx2_nix_toggle_flag_link_cfg(dev, false);
1934 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
1935 static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
1937 /* Initialize and register driver with DPDK Application */
1938 static const struct eth_dev_ops otx2_eth_dev_ops = {
1939 .dev_infos_get = otx2_nix_info_get,
1940 .dev_configure = otx2_nix_configure,
1941 .link_update = otx2_nix_link_update,
1942 .tx_queue_setup = otx2_nix_tx_queue_setup,
1943 .tx_queue_release = otx2_nix_tx_queue_release,
1944 .rx_queue_setup = otx2_nix_rx_queue_setup,
1945 .rx_queue_release = otx2_nix_rx_queue_release,
1946 .dev_start = otx2_nix_dev_start,
1947 .dev_stop = otx2_nix_dev_stop,
1948 .dev_close = otx2_nix_dev_close,
1949 .tx_queue_start = otx2_nix_tx_queue_start,
1950 .tx_queue_stop = otx2_nix_tx_queue_stop,
1951 .rx_queue_start = otx2_nix_rx_queue_start,
1952 .rx_queue_stop = otx2_nix_rx_queue_stop,
1953 .dev_set_link_up = otx2_nix_dev_set_link_up,
1954 .dev_set_link_down = otx2_nix_dev_set_link_down,
1955 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
1956 .dev_reset = otx2_nix_dev_reset,
1957 .stats_get = otx2_nix_dev_stats_get,
1958 .stats_reset = otx2_nix_dev_stats_reset,
1959 .get_reg = otx2_nix_dev_get_reg,
1960 .mtu_set = otx2_nix_mtu_set,
1961 .mac_addr_add = otx2_nix_mac_addr_add,
1962 .mac_addr_remove = otx2_nix_mac_addr_del,
1963 .mac_addr_set = otx2_nix_mac_addr_set,
1964 .set_mc_addr_list = otx2_nix_set_mc_addr_list,
1965 .promiscuous_enable = otx2_nix_promisc_enable,
1966 .promiscuous_disable = otx2_nix_promisc_disable,
1967 .allmulticast_enable = otx2_nix_allmulticast_enable,
1968 .allmulticast_disable = otx2_nix_allmulticast_disable,
1969 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
1970 .reta_update = otx2_nix_dev_reta_update,
1971 .reta_query = otx2_nix_dev_reta_query,
1972 .rss_hash_update = otx2_nix_rss_hash_update,
1973 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
1974 .xstats_get = otx2_nix_xstats_get,
1975 .xstats_get_names = otx2_nix_xstats_get_names,
1976 .xstats_reset = otx2_nix_xstats_reset,
1977 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
1978 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
1979 .rxq_info_get = otx2_nix_rxq_info_get,
1980 .txq_info_get = otx2_nix_txq_info_get,
1981 .rx_queue_count = otx2_nix_rx_queue_count,
1982 .rx_descriptor_done = otx2_nix_rx_descriptor_done,
1983 .rx_descriptor_status = otx2_nix_rx_descriptor_status,
1984 .tx_descriptor_status = otx2_nix_tx_descriptor_status,
1985 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
1986 .pool_ops_supported = otx2_nix_pool_ops_supported,
1987 .filter_ctrl = otx2_nix_dev_filter_ctrl,
1988 .get_module_info = otx2_nix_get_module_info,
1989 .get_module_eeprom = otx2_nix_get_module_eeprom,
1990 .fw_version_get = otx2_nix_fw_version_get,
1991 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
1992 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
1993 .timesync_enable = otx2_nix_timesync_enable,
1994 .timesync_disable = otx2_nix_timesync_disable,
1995 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
1996 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
1997 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
1998 .timesync_read_time = otx2_nix_timesync_read_time,
1999 .timesync_write_time = otx2_nix_timesync_write_time,
2000 .vlan_offload_set = otx2_nix_vlan_offload_set,
2001 .vlan_filter_set = otx2_nix_vlan_filter_set,
2002 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
2003 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
2004 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
2005 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
2006 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
2007 .read_clock = otx2_nix_read_clock,
2011 nix_lf_attach(struct otx2_eth_dev *dev)
2013 struct otx2_mbox *mbox = dev->mbox;
2014 struct rsrc_attach_req *req;
2016 /* Attach NIX(lf) */
2017 req = otx2_mbox_alloc_msg_attach_resources(mbox);
2021 return otx2_mbox_process(mbox);
2025 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
2027 struct otx2_mbox *mbox = dev->mbox;
2028 struct msix_offset_rsp *msix_rsp;
2031 /* Get NPA and NIX MSIX vector offsets */
2032 otx2_mbox_alloc_msg_msix_offset(mbox);
2034 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
2036 dev->nix_msixoff = msix_rsp->nix_msixoff;
2042 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
2044 struct rsrc_detach_req *req;
2046 req = otx2_mbox_alloc_msg_detach_resources(mbox);
2048 /* Detach all except npa lf */
2049 req->partial = true;
2056 return otx2_mbox_process(mbox);
2060 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
2062 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2063 struct rte_pci_device *pci_dev;
2064 int rc, max_entries;
2066 eth_dev->dev_ops = &otx2_eth_dev_ops;
2068 /* For secondary processes, the primary has done all the work */
2069 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2070 /* Setup callbacks for secondary process */
2071 otx2_eth_set_tx_function(eth_dev);
2072 otx2_eth_set_rx_function(eth_dev);
2076 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2078 rte_eth_copy_pci_info(eth_dev, pci_dev);
2079 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2081 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
2082 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
2083 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
2085 /* Parse devargs string */
2086 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
2088 otx2_err("Failed to parse devargs rc=%d", rc);
2092 if (!dev->mbox_active) {
2093 /* Initialize the base otx2_dev object
2094 * only if already present
2096 rc = otx2_dev_init(pci_dev, dev);
2098 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
2102 /* Device generic callbacks */
2103 dev->ops = &otx2_dev_ops;
2104 dev->eth_dev = eth_dev;
2106 /* Grab the NPA LF if required */
2107 rc = otx2_npa_lf_init(pci_dev, dev);
2109 goto otx2_dev_uninit;
2111 dev->configured = 0;
2112 dev->drv_inited = true;
2113 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
2114 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2117 rc = nix_lf_attach(dev);
2119 goto otx2_npa_uninit;
2121 /* Get NIX MSIX offset */
2122 rc = nix_lf_get_msix_offset(dev);
2124 goto otx2_npa_uninit;
2126 /* Register LF irq handlers */
2127 rc = otx2_nix_register_irqs(eth_dev);
2131 /* Get maximum number of supported MAC entries */
2132 max_entries = otx2_cgx_mac_max_entries_get(dev);
2133 if (max_entries < 0) {
2134 otx2_err("Failed to get max entries for mac addr");
2136 goto unregister_irq;
2139 /* For VFs, returned max_entries will be 0. But to keep default MAC
2140 * address, one entry must be allocated. So setting up to 1.
2142 if (max_entries == 0)
2145 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2146 RTE_ETHER_ADDR_LEN, 0);
2147 if (eth_dev->data->mac_addrs == NULL) {
2148 otx2_err("Failed to allocate memory for mac addr");
2150 goto unregister_irq;
2153 dev->max_mac_entries = max_entries;
2155 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2157 goto free_mac_addrs;
2159 /* Update the mac address */
2160 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2162 /* Also sync same MAC address to CGX table */
2163 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
2165 /* Initialize the tm data structures */
2166 otx2_nix_tm_conf_init(eth_dev);
2168 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2169 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2171 if (otx2_dev_is_96xx_A0(dev) ||
2172 otx2_dev_is_95xx_Ax(dev)) {
2173 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2174 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2177 /* Initialize rte-flow */
2178 rc = otx2_flow_init(dev);
2180 goto free_mac_addrs;
2182 otx2_nix_mc_filter_init(dev);
2184 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2185 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2186 eth_dev->data->port_id, dev->pf, dev->vf,
2187 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2188 dev->rx_offload_capa, dev->tx_offload_capa);
2192 rte_free(eth_dev->data->mac_addrs);
2194 otx2_nix_unregister_irqs(eth_dev);
2196 otx2_eth_dev_lf_detach(dev->mbox);
2200 otx2_dev_fini(pci_dev, dev);
2202 otx2_err("Failed to init nix eth_dev rc=%d", rc);
2207 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2209 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2210 struct rte_pci_device *pci_dev;
2213 /* Nothing to be done for secondary processes */
2214 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2217 /* Clear the flag since we are closing down */
2218 dev->configured = 0;
2220 /* Disable nix bpid config */
2221 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2223 npc_rx_disable(dev);
2225 /* Disable vlan offloads */
2226 otx2_nix_vlan_fini(eth_dev);
2228 /* Disable other rte_flow entries */
2229 otx2_flow_fini(dev);
2231 /* Free multicast filter list */
2232 otx2_nix_mc_filter_fini(dev);
2234 /* Disable PTP if already enabled */
2235 if (otx2_ethdev_is_ptp_en(dev))
2236 otx2_nix_timesync_disable(eth_dev);
2238 nix_cgx_stop_link_event(dev);
2241 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2242 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
2243 eth_dev->data->tx_queues[i] = NULL;
2245 eth_dev->data->nb_tx_queues = 0;
2247 /* Free up RQ's and CQ's */
2248 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2249 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
2250 eth_dev->data->rx_queues[i] = NULL;
2252 eth_dev->data->nb_rx_queues = 0;
2254 /* Free tm resources */
2255 rc = otx2_nix_tm_fini(eth_dev);
2257 otx2_err("Failed to cleanup tm, rc=%d", rc);
2259 /* Unregister queue irqs */
2260 oxt2_nix_unregister_queue_irqs(eth_dev);
2262 /* Unregister cq irqs */
2263 if (eth_dev->data->dev_conf.intr_conf.rxq)
2264 oxt2_nix_unregister_cq_irqs(eth_dev);
2266 rc = nix_lf_free(dev);
2268 otx2_err("Failed to free nix lf, rc=%d", rc);
2270 rc = otx2_npa_lf_fini();
2272 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2274 rte_free(eth_dev->data->mac_addrs);
2275 eth_dev->data->mac_addrs = NULL;
2276 dev->drv_inited = false;
2278 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2279 otx2_nix_unregister_irqs(eth_dev);
2281 rc = otx2_eth_dev_lf_detach(dev->mbox);
2283 otx2_err("Failed to detach resources, rc=%d", rc);
2285 /* Check if mbox close is needed */
2289 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2290 /* Will be freed later by PMD */
2291 eth_dev->data->dev_private = NULL;
2295 otx2_dev_fini(pci_dev, dev);
2300 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2302 otx2_eth_dev_uninit(eth_dev, true);
2306 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2310 rc = otx2_eth_dev_uninit(eth_dev, false);
2314 return otx2_eth_dev_init(eth_dev);
2318 nix_remove(struct rte_pci_device *pci_dev)
2320 struct rte_eth_dev *eth_dev;
2321 struct otx2_idev_cfg *idev;
2322 struct otx2_dev *otx2_dev;
2325 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2327 /* Cleanup eth dev */
2328 rc = otx2_eth_dev_uninit(eth_dev, true);
2332 rte_eth_dev_pci_release(eth_dev);
2335 /* Nothing to be done for secondary processes */
2336 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2339 /* Check for common resources */
2340 idev = otx2_intra_dev_get_cfg();
2341 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2344 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2346 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2349 /* Safe to cleanup mbox as no more users */
2350 otx2_dev_fini(pci_dev, otx2_dev);
2355 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2360 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2364 RTE_SET_USED(pci_drv);
2366 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2369 /* On error on secondary, recheck if port exists in primary or
2370 * in mid of detach state.
2372 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2373 if (!rte_eth_dev_allocated(pci_dev->device.name))
2378 static const struct rte_pci_id pci_nix_map[] = {
2380 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2383 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2386 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2387 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2394 static struct rte_pci_driver pci_nix = {
2395 .id_table = pci_nix_map,
2396 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2397 RTE_PCI_DRV_INTR_LSC,
2399 .remove = nix_remove,
2402 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
2403 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
2404 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");