1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include <rte_ethdev_pci.h>
9 #include <rte_malloc.h>
11 #include <rte_mbuf_pool_ops.h>
12 #include <rte_mempool.h>
14 #include "otx2_ethdev.h"
15 #include "otx2_ethdev_sec.h"
17 static inline uint64_t
18 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
20 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
22 if (otx2_dev_is_vf(dev) ||
23 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG)
24 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
29 static inline uint64_t
30 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
32 uint64_t capa = NIX_TX_OFFLOAD_CAPA;
34 /* TSO not supported for earlier chip revisions */
35 if (otx2_dev_is_96xx_A0(dev) || otx2_dev_is_95xx_Ax(dev))
36 capa &= ~(DEV_TX_OFFLOAD_TCP_TSO |
37 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
38 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
39 DEV_TX_OFFLOAD_GRE_TNL_TSO);
43 static const struct otx2_dev_ops otx2_dev_ops = {
44 .link_status_update = otx2_eth_dev_link_status_update,
45 .ptp_info_update = otx2_eth_dev_ptp_info_update
49 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
51 struct otx2_mbox *mbox = dev->mbox;
52 struct nix_lf_alloc_req *req;
53 struct nix_lf_alloc_rsp *rsp;
56 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
60 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
61 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
62 req->xqe_sz = NIX_XQESZ_W16;
63 req->rss_sz = dev->rss_info.rss_size;
64 req->rss_grps = NIX_RSS_GRPS;
65 req->npa_func = otx2_npa_pf_func_get();
66 req->sso_func = otx2_sso_pf_func_get();
67 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
68 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
69 DEV_RX_OFFLOAD_UDP_CKSUM)) {
70 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
71 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
73 req->rx_cfg |= BIT_ULL(32 /* DROP_RE */);
74 if (dev->rss_tag_as_xor == 0)
75 req->flags = NIX_LF_RSS_TAG_LSB_AS_ADDER;
77 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
81 dev->sqb_size = rsp->sqb_size;
82 dev->tx_chan_base = rsp->tx_chan_base;
83 dev->rx_chan_base = rsp->rx_chan_base;
84 dev->rx_chan_cnt = rsp->rx_chan_cnt;
85 dev->tx_chan_cnt = rsp->tx_chan_cnt;
86 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
87 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
88 dev->lf_tx_stats = rsp->lf_tx_stats;
89 dev->lf_rx_stats = rsp->lf_rx_stats;
90 dev->cints = rsp->cints;
91 dev->qints = rsp->qints;
92 dev->npc_flow.channel = dev->rx_chan_base;
93 dev->ptp_en = rsp->hw_rx_tstamp_en;
99 nix_lf_switch_header_type_enable(struct otx2_eth_dev *dev, bool enable)
101 struct otx2_mbox *mbox = dev->mbox;
102 struct npc_set_pkind *req;
103 struct msg_resp *rsp;
106 if (dev->npc_flow.switch_header_type == 0)
109 /* Notify AF about higig2 config */
110 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
111 req->mode = dev->npc_flow.switch_header_type;
113 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
115 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
118 req = otx2_mbox_alloc_msg_npc_set_pkind(mbox);
119 req->mode = dev->npc_flow.switch_header_type;
121 req->mode = OTX2_PRIV_FLAGS_DEFAULT;
123 return otx2_mbox_process_msg(mbox, (void *)&rsp);
127 nix_lf_free(struct otx2_eth_dev *dev)
129 struct otx2_mbox *mbox = dev->mbox;
130 struct nix_lf_free_req *req;
131 struct ndc_sync_op *ndc_req;
134 /* Sync NDC-NIX for LF */
135 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
136 ndc_req->nix_lf_tx_sync = 1;
137 ndc_req->nix_lf_rx_sync = 1;
138 rc = otx2_mbox_process(mbox);
140 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
142 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
143 /* Let AF driver free all this nix lf's
144 * NPC entries allocated using NPC MBOX.
148 return otx2_mbox_process(mbox);
152 otx2_cgx_rxtx_start(struct otx2_eth_dev *dev)
154 struct otx2_mbox *mbox = dev->mbox;
156 if (otx2_dev_is_vf_or_sdp(dev))
159 otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
161 return otx2_mbox_process(mbox);
165 otx2_cgx_rxtx_stop(struct otx2_eth_dev *dev)
167 struct otx2_mbox *mbox = dev->mbox;
169 if (otx2_dev_is_vf_or_sdp(dev))
172 otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
174 return otx2_mbox_process(mbox);
178 npc_rx_enable(struct otx2_eth_dev *dev)
180 struct otx2_mbox *mbox = dev->mbox;
182 otx2_mbox_alloc_msg_nix_lf_start_rx(mbox);
184 return otx2_mbox_process(mbox);
188 npc_rx_disable(struct otx2_eth_dev *dev)
190 struct otx2_mbox *mbox = dev->mbox;
192 otx2_mbox_alloc_msg_nix_lf_stop_rx(mbox);
194 return otx2_mbox_process(mbox);
198 nix_cgx_start_link_event(struct otx2_eth_dev *dev)
200 struct otx2_mbox *mbox = dev->mbox;
202 if (otx2_dev_is_vf_or_sdp(dev))
205 otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
207 return otx2_mbox_process(mbox);
211 cgx_intlbk_enable(struct otx2_eth_dev *dev, bool en)
213 struct otx2_mbox *mbox = dev->mbox;
215 if (en && otx2_dev_is_vf_or_sdp(dev))
219 otx2_mbox_alloc_msg_cgx_intlbk_enable(mbox);
221 otx2_mbox_alloc_msg_cgx_intlbk_disable(mbox);
223 return otx2_mbox_process(mbox);
227 nix_cgx_stop_link_event(struct otx2_eth_dev *dev)
229 struct otx2_mbox *mbox = dev->mbox;
231 if (otx2_dev_is_vf_or_sdp(dev))
234 otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
236 return otx2_mbox_process(mbox);
240 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
246 static inline uint32_t
247 nix_qsize_to_val(enum nix_q_size_e qsize)
249 return (16UL << (qsize * 2));
252 static inline enum nix_q_size_e
253 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
257 if (otx2_ethdev_fixup_is_min_4k_q(dev))
262 for (; i < nix_q_size_max; i++)
263 if (val <= nix_qsize_to_val(i))
266 if (i >= nix_q_size_max)
267 i = nix_q_size_max - 1;
273 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
274 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
276 struct otx2_mbox *mbox = dev->mbox;
277 const struct rte_memzone *rz;
278 uint32_t ring_size, cq_size;
279 struct nix_aq_enq_req *aq;
284 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
285 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
286 NIX_CQ_ALIGN, dev->node);
288 otx2_err("Failed to allocate mem for cq hw ring");
292 memset(rz->addr, 0, rz->len);
293 rxq->desc = (uintptr_t)rz->addr;
294 rxq->qmask = cq_size - 1;
296 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
298 aq->ctype = NIX_AQ_CTYPE_CQ;
299 aq->op = NIX_AQ_INSTOP_INIT;
303 aq->cq.qsize = rxq->qsize;
304 aq->cq.base = rz->iova;
305 aq->cq.avg_level = 0xff;
306 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
307 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
309 /* Many to one reduction */
310 aq->cq.qint_idx = qid % dev->qints;
311 /* Map CQ0 [RQ0] to CINT0 and so on till max 64 irqs */
312 aq->cq.cint_idx = qid;
314 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
315 const float rx_cq_skid = NIX_CQ_FULL_ERRATA_SKID;
316 uint16_t min_rx_drop;
318 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
319 aq->cq.drop = min_rx_drop;
321 rxq->cq_drop = min_rx_drop;
323 rxq->cq_drop = NIX_CQ_THRESH_LEVEL;
324 aq->cq.drop = rxq->cq_drop;
328 /* TX pause frames enable flowctrl on RX side */
329 if (dev->fc_info.tx_pause) {
330 /* Single bpid is allocated for all rx channels for now */
331 aq->cq.bpid = dev->fc_info.bpid[0];
332 aq->cq.bp = rxq->cq_drop;
336 rc = otx2_mbox_process(mbox);
338 otx2_err("Failed to init cq context");
342 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
344 aq->ctype = NIX_AQ_CTYPE_RQ;
345 aq->op = NIX_AQ_INSTOP_INIT;
348 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
350 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
351 first_skip = (sizeof(struct rte_mbuf));
352 first_skip += RTE_PKTMBUF_HEADROOM;
353 first_skip += rte_pktmbuf_priv_size(mp);
354 rxq->data_off = first_skip;
356 first_skip /= 8; /* Expressed in number of dwords */
357 aq->rq.first_skip = first_skip;
358 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
359 aq->rq.flow_tagw = 32; /* 32-bits */
360 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
361 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
362 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
363 aq->rq.lpb_sizem1 /= 8;
364 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
366 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
367 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
368 aq->rq.rq_int_ena = 0;
369 /* Many to one reduction */
370 aq->rq.qint_idx = qid % dev->qints;
372 aq->rq.xqe_drop_ena = 1;
374 rc = otx2_mbox_process(mbox);
376 otx2_err("Failed to init rq context");
386 nix_rq_enb_dis(struct rte_eth_dev *eth_dev,
387 struct otx2_eth_rxq *rxq, const bool enb)
389 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
390 struct otx2_mbox *mbox = dev->mbox;
391 struct nix_aq_enq_req *aq;
393 /* Pkts will be dropped silently if RQ is disabled */
394 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
396 aq->ctype = NIX_AQ_CTYPE_RQ;
397 aq->op = NIX_AQ_INSTOP_WRITE;
400 aq->rq_mask.ena = ~(aq->rq_mask.ena);
402 return otx2_mbox_process(mbox);
406 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
408 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
409 struct otx2_mbox *mbox = dev->mbox;
410 struct nix_aq_enq_req *aq;
413 /* RQ is already disabled */
415 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
417 aq->ctype = NIX_AQ_CTYPE_CQ;
418 aq->op = NIX_AQ_INSTOP_WRITE;
421 aq->cq_mask.ena = ~(aq->cq_mask.ena);
423 rc = otx2_mbox_process(mbox);
425 otx2_err("Failed to disable cq context");
433 nix_get_data_off(struct otx2_eth_dev *dev)
435 return otx2_ethdev_is_ptp_en(dev) ? NIX_TIMESYNC_RX_OFFSET : 0;
439 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
441 struct rte_mbuf mb_def;
444 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
445 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
446 offsetof(struct rte_mbuf, data_off) != 2);
447 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
448 offsetof(struct rte_mbuf, data_off) != 4);
449 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
450 offsetof(struct rte_mbuf, data_off) != 6);
452 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
453 mb_def.port = port_id;
454 rte_mbuf_refcnt_set(&mb_def, 1);
456 /* Prevent compiler reordering: rearm_data covers previous fields */
457 rte_compiler_barrier();
458 tmp = (uint64_t *)&mb_def.rearm_data;
464 otx2_nix_rx_queue_release(void *rx_queue)
466 struct otx2_eth_rxq *rxq = rx_queue;
471 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
472 nix_cq_rq_uninit(rxq->eth_dev, rxq);
477 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
478 uint16_t nb_desc, unsigned int socket,
479 const struct rte_eth_rxconf *rx_conf,
480 struct rte_mempool *mp)
482 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
483 struct rte_mempool_ops *ops;
484 struct otx2_eth_rxq *rxq;
485 const char *platform_ops;
486 enum nix_q_size_e qsize;
492 /* Compile time check to make sure all fast path elements in a CL */
493 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
496 if (rx_conf->rx_deferred_start == 1) {
497 otx2_err("Deferred Rx start is not supported");
501 platform_ops = rte_mbuf_platform_mempool_ops();
502 /* This driver needs octeontx2_npa mempool ops to work */
503 ops = rte_mempool_get_ops(mp->ops_index);
504 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
505 otx2_err("mempool ops should be of octeontx2_npa type");
509 if (mp->pool_id == 0) {
510 otx2_err("Invalid pool_id");
514 /* Free memory prior to re-allocation if needed */
515 if (eth_dev->data->rx_queues[rq] != NULL) {
516 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
517 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
518 eth_dev->data->rx_queues[rq] = NULL;
521 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
522 dev->rx_offloads |= offloads;
524 /* Find the CQ queue size */
525 qsize = nix_qsize_clampup_get(dev, nb_desc);
526 /* Allocate rxq memory */
527 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
529 otx2_err("Failed to allocate rq=%d", rq);
534 rxq->eth_dev = eth_dev;
536 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
537 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
538 rxq->wdata = (uint64_t)rq << 32;
539 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
540 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
541 eth_dev->data->port_id);
542 rxq->offloads = offloads;
544 rxq->qlen = nix_qsize_to_val(qsize);
546 rxq->lookup_mem = otx2_nix_fastpath_lookup_mem_get();
547 rxq->tstamp = &dev->tstamp;
549 /* Alloc completion queue */
550 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
552 otx2_err("Failed to allocate rxq=%u", rq);
556 rxq->qconf.socket_id = socket;
557 rxq->qconf.nb_desc = nb_desc;
558 rxq->qconf.mempool = mp;
559 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
561 nix_rx_queue_reset(rxq);
562 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
563 rq, mp->name, qsize, nb_desc, rxq->qlen);
565 eth_dev->data->rx_queues[rq] = rxq;
566 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
568 /* Calculating delta and freq mult between PTP HI clock and tsc.
569 * These are needed in deriving raw clock value from tsc counter.
570 * read_clock eth op returns raw clock value.
572 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
573 otx2_ethdev_is_ptp_en(dev)) {
574 rc = otx2_nix_raw_clock_tsc_conv(dev);
576 otx2_err("Failed to calculate delta and freq mult");
584 otx2_nix_rx_queue_release(rxq);
589 static inline uint8_t
590 nix_sq_max_sqe_sz(struct otx2_eth_txq *txq)
593 * Maximum three segments can be supported with W8, Choose
594 * NIX_MAXSQESZ_W16 for multi segment offload.
596 if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
597 return NIX_MAXSQESZ_W16;
599 return NIX_MAXSQESZ_W8;
603 nix_rx_offload_flags(struct rte_eth_dev *eth_dev)
605 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
606 struct rte_eth_dev_data *data = eth_dev->data;
607 struct rte_eth_conf *conf = &data->dev_conf;
608 struct rte_eth_rxmode *rxmode = &conf->rxmode;
611 if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
612 (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
613 flags |= NIX_RX_OFFLOAD_RSS_F;
615 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
616 DEV_RX_OFFLOAD_UDP_CKSUM))
617 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
619 if (dev->rx_offloads & (DEV_RX_OFFLOAD_IPV4_CKSUM |
620 DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
621 flags |= NIX_RX_OFFLOAD_CHECKSUM_F;
623 if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
624 flags |= NIX_RX_MULTI_SEG_F;
626 if (dev->rx_offloads & (DEV_RX_OFFLOAD_VLAN_STRIP |
627 DEV_RX_OFFLOAD_QINQ_STRIP))
628 flags |= NIX_RX_OFFLOAD_VLAN_STRIP_F;
630 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
631 flags |= NIX_RX_OFFLOAD_TSTAMP_F;
633 if (!dev->ptype_disable)
634 flags |= NIX_RX_OFFLOAD_PTYPE_F;
640 nix_tx_offload_flags(struct rte_eth_dev *eth_dev)
642 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
643 uint64_t conf = dev->tx_offloads;
646 /* Fastpath is dependent on these enums */
647 RTE_BUILD_BUG_ON(PKT_TX_TCP_CKSUM != (1ULL << 52));
648 RTE_BUILD_BUG_ON(PKT_TX_SCTP_CKSUM != (2ULL << 52));
649 RTE_BUILD_BUG_ON(PKT_TX_UDP_CKSUM != (3ULL << 52));
650 RTE_BUILD_BUG_ON(PKT_TX_IP_CKSUM != (1ULL << 54));
651 RTE_BUILD_BUG_ON(PKT_TX_IPV4 != (1ULL << 55));
652 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IP_CKSUM != (1ULL << 58));
653 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV4 != (1ULL << 59));
654 RTE_BUILD_BUG_ON(PKT_TX_OUTER_IPV6 != (1ULL << 60));
655 RTE_BUILD_BUG_ON(PKT_TX_OUTER_UDP_CKSUM != (1ULL << 41));
656 RTE_BUILD_BUG_ON(RTE_MBUF_L2_LEN_BITS != 7);
657 RTE_BUILD_BUG_ON(RTE_MBUF_L3_LEN_BITS != 9);
658 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL2_LEN_BITS != 7);
659 RTE_BUILD_BUG_ON(RTE_MBUF_OUTL3_LEN_BITS != 9);
660 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
661 offsetof(struct rte_mbuf, buf_iova) + 8);
662 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
663 offsetof(struct rte_mbuf, buf_iova) + 16);
664 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
665 offsetof(struct rte_mbuf, ol_flags) + 12);
666 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, tx_offload) !=
667 offsetof(struct rte_mbuf, pool) + 2 * sizeof(void *));
669 if (conf & DEV_TX_OFFLOAD_VLAN_INSERT ||
670 conf & DEV_TX_OFFLOAD_QINQ_INSERT)
671 flags |= NIX_TX_OFFLOAD_VLAN_QINQ_F;
673 if (conf & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM ||
674 conf & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM)
675 flags |= NIX_TX_OFFLOAD_OL3_OL4_CSUM_F;
677 if (conf & DEV_TX_OFFLOAD_IPV4_CKSUM ||
678 conf & DEV_TX_OFFLOAD_TCP_CKSUM ||
679 conf & DEV_TX_OFFLOAD_UDP_CKSUM ||
680 conf & DEV_TX_OFFLOAD_SCTP_CKSUM)
681 flags |= NIX_TX_OFFLOAD_L3_L4_CSUM_F;
683 if (!(conf & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
684 flags |= NIX_TX_OFFLOAD_MBUF_NOFF_F;
686 if (conf & DEV_TX_OFFLOAD_MULTI_SEGS)
687 flags |= NIX_TX_MULTI_SEG_F;
689 /* Enable Inner checksum for TSO */
690 if (conf & DEV_TX_OFFLOAD_TCP_TSO)
691 flags |= (NIX_TX_OFFLOAD_TSO_F |
692 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
694 /* Enable Inner and Outer checksum for Tunnel TSO */
695 if (conf & (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
696 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
697 DEV_TX_OFFLOAD_GRE_TNL_TSO))
698 flags |= (NIX_TX_OFFLOAD_TSO_F |
699 NIX_TX_OFFLOAD_OL3_OL4_CSUM_F |
700 NIX_TX_OFFLOAD_L3_L4_CSUM_F);
702 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP))
703 flags |= NIX_TX_OFFLOAD_TSTAMP_F;
709 nix_sq_init(struct otx2_eth_txq *txq)
711 struct otx2_eth_dev *dev = txq->dev;
712 struct otx2_mbox *mbox = dev->mbox;
713 struct nix_aq_enq_req *sq;
718 if (txq->sqb_pool->pool_id == 0)
721 rc = otx2_nix_tm_get_leaf_data(dev, txq->sq, &rr_quantum, &smq);
723 otx2_err("Failed to get sq->smq(leaf node), rc=%d", rc);
727 sq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
729 sq->ctype = NIX_AQ_CTYPE_SQ;
730 sq->op = NIX_AQ_INSTOP_INIT;
731 sq->sq.max_sqe_size = nix_sq_max_sqe_sz(txq);
734 sq->sq.smq_rr_quantum = rr_quantum;
735 sq->sq.default_chan = dev->tx_chan_base;
736 sq->sq.sqe_stype = NIX_STYPE_STF;
738 if (sq->sq.max_sqe_size == NIX_MAXSQESZ_W8)
739 sq->sq.sqe_stype = NIX_STYPE_STP;
741 npa_lf_aura_handle_to_aura(txq->sqb_pool->pool_id);
742 sq->sq.sq_int_ena = BIT(NIX_SQINT_LMT_ERR);
743 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SQB_ALLOC_FAIL);
744 sq->sq.sq_int_ena |= BIT(NIX_SQINT_SEND_ERR);
745 sq->sq.sq_int_ena |= BIT(NIX_SQINT_MNQ_ERR);
747 /* Many to one reduction */
748 sq->sq.qint_idx = txq->sq % dev->qints;
750 return otx2_mbox_process(mbox);
754 nix_sq_uninit(struct otx2_eth_txq *txq)
756 struct otx2_eth_dev *dev = txq->dev;
757 struct otx2_mbox *mbox = dev->mbox;
758 struct ndc_sync_op *ndc_req;
759 struct nix_aq_enq_rsp *rsp;
760 struct nix_aq_enq_req *aq;
761 uint16_t sqes_per_sqb;
765 otx2_nix_dbg("Cleaning up sq %u", txq->sq);
767 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
769 aq->ctype = NIX_AQ_CTYPE_SQ;
770 aq->op = NIX_AQ_INSTOP_READ;
772 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
776 /* Check if sq is already cleaned up */
781 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
783 aq->ctype = NIX_AQ_CTYPE_SQ;
784 aq->op = NIX_AQ_INSTOP_WRITE;
786 aq->sq_mask.ena = ~aq->sq_mask.ena;
789 rc = otx2_mbox_process(mbox);
793 /* Read SQ and free sqb's */
794 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
796 aq->ctype = NIX_AQ_CTYPE_SQ;
797 aq->op = NIX_AQ_INSTOP_READ;
799 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
804 otx2_err("SQ has pending sqe's");
806 count = aq->sq.sqb_count;
807 sqes_per_sqb = 1 << txq->sqes_per_sqb_log2;
808 /* Free SQB's that are used */
809 sqb_buf = (void *)rsp->sq.head_sqb;
813 next_sqb = *(void **)((uintptr_t)sqb_buf + (uint32_t)
814 ((sqes_per_sqb - 1) *
815 nix_sq_max_sqe_sz(txq)));
816 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
822 /* Free next to use sqb */
823 if (rsp->sq.next_sqb)
824 npa_lf_aura_op_free(txq->sqb_pool->pool_id, 1,
827 /* Sync NDC-NIX-TX for LF */
828 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
829 ndc_req->nix_lf_tx_sync = 1;
830 rc = otx2_mbox_process(mbox);
832 otx2_err("Error on NDC-NIX-TX LF sync, rc %d", rc);
838 nix_sqb_aura_limit_cfg(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
840 struct otx2_npa_lf *npa_lf = otx2_intra_dev_get_cfg()->npa_lf;
841 struct npa_aq_enq_req *aura_req;
843 aura_req = otx2_mbox_alloc_msg_npa_aq_enq(npa_lf->mbox);
844 aura_req->aura_id = npa_lf_aura_handle_to_aura(mp->pool_id);
845 aura_req->ctype = NPA_AQ_CTYPE_AURA;
846 aura_req->op = NPA_AQ_INSTOP_WRITE;
848 aura_req->aura.limit = nb_sqb_bufs;
849 aura_req->aura_mask.limit = ~(aura_req->aura_mask.limit);
851 return otx2_mbox_process(npa_lf->mbox);
855 nix_alloc_sqb_pool(int port, struct otx2_eth_txq *txq, uint16_t nb_desc)
857 struct otx2_eth_dev *dev = txq->dev;
858 uint16_t sqes_per_sqb, nb_sqb_bufs;
859 char name[RTE_MEMPOOL_NAMESIZE];
860 struct rte_mempool_objsz sz;
861 struct npa_aura_s *aura;
862 uint32_t tmp, blk_sz;
864 aura = (struct npa_aura_s *)((uintptr_t)txq->fc_mem + OTX2_ALIGN);
865 snprintf(name, sizeof(name), "otx2_sqb_pool_%d_%d", port, txq->sq);
866 blk_sz = dev->sqb_size;
868 if (nix_sq_max_sqe_sz(txq) == NIX_MAXSQESZ_W16)
869 sqes_per_sqb = (dev->sqb_size / 8) / 16;
871 sqes_per_sqb = (dev->sqb_size / 8) / 8;
873 nb_sqb_bufs = nb_desc / sqes_per_sqb;
874 /* Clamp up to devarg passed SQB count */
875 nb_sqb_bufs = RTE_MIN(dev->max_sqb_count, RTE_MAX(NIX_DEF_SQB,
876 nb_sqb_bufs + NIX_SQB_LIST_SPACE));
878 txq->sqb_pool = rte_mempool_create_empty(name, NIX_MAX_SQB, blk_sz,
880 MEMPOOL_F_NO_SPREAD);
881 txq->nb_sqb_bufs = nb_sqb_bufs;
882 txq->sqes_per_sqb_log2 = (uint16_t)rte_log2_u32(sqes_per_sqb);
883 txq->nb_sqb_bufs_adj = nb_sqb_bufs -
884 RTE_ALIGN_MUL_CEIL(nb_sqb_bufs, sqes_per_sqb) / sqes_per_sqb;
885 txq->nb_sqb_bufs_adj =
886 (NIX_SQB_LOWER_THRESH * txq->nb_sqb_bufs_adj) / 100;
888 if (txq->sqb_pool == NULL) {
889 otx2_err("Failed to allocate sqe mempool");
893 memset(aura, 0, sizeof(*aura));
895 aura->fc_addr = txq->fc_iova;
896 aura->fc_hyst_bits = 0; /* Store count on all updates */
897 if (rte_mempool_set_ops_byname(txq->sqb_pool, "octeontx2_npa", aura)) {
898 otx2_err("Failed to set ops for sqe mempool");
901 if (rte_mempool_populate_default(txq->sqb_pool) < 0) {
902 otx2_err("Failed to populate sqe mempool");
906 tmp = rte_mempool_calc_obj_size(blk_sz, MEMPOOL_F_NO_SPREAD, &sz);
907 if (dev->sqb_size != sz.elt_size) {
908 otx2_err("sqe pool block size is not expected %d != %d",
913 nix_sqb_aura_limit_cfg(txq->sqb_pool, txq->nb_sqb_bufs);
921 otx2_nix_form_default_desc(struct otx2_eth_txq *txq)
923 struct nix_send_ext_s *send_hdr_ext;
924 struct nix_send_hdr_s *send_hdr;
925 struct nix_send_mem_s *send_mem;
926 union nix_send_sg_s *sg;
928 /* Initialize the fields based on basic single segment packet */
929 memset(&txq->cmd, 0, sizeof(txq->cmd));
931 if (txq->dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
932 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
933 /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
934 send_hdr->w0.sizem1 = 2;
936 send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
937 send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
938 if (txq->dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F) {
939 /* Default: one seg packet would have:
940 * 2(HDR) + 2(EXT) + 1(SG) + 1(IOVA) + 2(MEM)
943 send_hdr->w0.sizem1 = 3;
944 send_hdr_ext->w0.tstmp = 1;
946 /* To calculate the offset for send_mem,
947 * send_hdr->w0.sizem1 * 2
949 send_mem = (struct nix_send_mem_s *)(txq->cmd +
950 (send_hdr->w0.sizem1 << 1));
951 send_mem->subdc = NIX_SUBDC_MEM;
952 send_mem->alg = NIX_SENDMEMALG_SETTSTMP;
953 send_mem->addr = txq->dev->tstamp.tx_tstamp_iova;
955 sg = (union nix_send_sg_s *)&txq->cmd[4];
957 send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
958 /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
959 send_hdr->w0.sizem1 = 1;
960 sg = (union nix_send_sg_s *)&txq->cmd[2];
963 send_hdr->w0.sq = txq->sq;
964 sg->subdc = NIX_SUBDC_SG;
966 sg->ld_type = NIX_SENDLDTYPE_LDD;
972 otx2_nix_tx_queue_release(void *_txq)
974 struct otx2_eth_txq *txq = _txq;
975 struct rte_eth_dev *eth_dev;
980 eth_dev = txq->dev->eth_dev;
982 otx2_nix_dbg("Releasing txq %u", txq->sq);
984 /* Flush and disable tm */
985 otx2_nix_tm_sw_xoff(txq, eth_dev->data->dev_started);
987 /* Free sqb's and disable sq */
991 rte_mempool_free(txq->sqb_pool);
992 txq->sqb_pool = NULL;
999 otx2_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t sq,
1000 uint16_t nb_desc, unsigned int socket_id,
1001 const struct rte_eth_txconf *tx_conf)
1003 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1004 const struct rte_memzone *fc;
1005 struct otx2_eth_txq *txq;
1011 /* Compile time check to make sure all fast path elements in a CL */
1012 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_txq, slow_path_start) >= 128);
1014 if (tx_conf->tx_deferred_start) {
1015 otx2_err("Tx deferred start is not supported");
1019 /* Free memory prior to re-allocation if needed. */
1020 if (eth_dev->data->tx_queues[sq] != NULL) {
1021 otx2_nix_dbg("Freeing memory prior to re-allocation %d", sq);
1022 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[sq]);
1023 eth_dev->data->tx_queues[sq] = NULL;
1026 /* Find the expected offloads for this queue */
1027 offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
1029 /* Allocating tx queue data structure */
1030 txq = rte_zmalloc_socket("otx2_ethdev TX queue", sizeof(*txq),
1031 OTX2_ALIGN, socket_id);
1033 otx2_err("Failed to alloc txq=%d", sq);
1039 txq->sqb_pool = NULL;
1040 txq->offloads = offloads;
1041 dev->tx_offloads |= offloads;
1044 * Allocate memory for flow control updates from HW.
1045 * Alloc one cache line, so that fits all FC_STYPE modes.
1047 fc = rte_eth_dma_zone_reserve(eth_dev, "fcmem", sq,
1048 OTX2_ALIGN + sizeof(struct npa_aura_s),
1049 OTX2_ALIGN, dev->node);
1051 otx2_err("Failed to allocate mem for fcmem");
1055 txq->fc_iova = fc->iova;
1056 txq->fc_mem = fc->addr;
1058 /* Initialize the aura sqb pool */
1059 rc = nix_alloc_sqb_pool(eth_dev->data->port_id, txq, nb_desc);
1061 otx2_err("Failed to alloc sqe pool rc=%d", rc);
1065 /* Initialize the SQ */
1066 rc = nix_sq_init(txq);
1068 otx2_err("Failed to init sq=%d context", sq);
1072 txq->fc_cache_pkts = 0;
1073 txq->io_addr = dev->base + NIX_LF_OP_SENDX(0);
1074 /* Evenly distribute LMT slot for each sq */
1075 txq->lmt_addr = (void *)(dev->lmt_addr + ((sq & LMT_SLOT_MASK) << 12));
1077 txq->qconf.socket_id = socket_id;
1078 txq->qconf.nb_desc = nb_desc;
1079 memcpy(&txq->qconf.conf.tx, tx_conf, sizeof(struct rte_eth_txconf));
1081 otx2_nix_form_default_desc(txq);
1083 otx2_nix_dbg("sq=%d fc=%p offload=0x%" PRIx64 " sqb=0x%" PRIx64 ""
1084 " lmt_addr=%p nb_sqb_bufs=%d sqes_per_sqb_log2=%d", sq,
1085 fc->addr, offloads, txq->sqb_pool->pool_id, txq->lmt_addr,
1086 txq->nb_sqb_bufs, txq->sqes_per_sqb_log2);
1087 eth_dev->data->tx_queues[sq] = txq;
1088 eth_dev->data->tx_queue_state[sq] = RTE_ETH_QUEUE_STATE_STOPPED;
1092 otx2_nix_tx_queue_release(txq);
1098 nix_store_queue_cfg_and_then_release(struct rte_eth_dev *eth_dev)
1100 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1101 struct otx2_eth_qconf *tx_qconf = NULL;
1102 struct otx2_eth_qconf *rx_qconf = NULL;
1103 struct otx2_eth_txq **txq;
1104 struct otx2_eth_rxq **rxq;
1105 int i, nb_rxq, nb_txq;
1107 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1108 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1110 tx_qconf = malloc(nb_txq * sizeof(*tx_qconf));
1111 if (tx_qconf == NULL) {
1112 otx2_err("Failed to allocate memory for tx_qconf");
1116 rx_qconf = malloc(nb_rxq * sizeof(*rx_qconf));
1117 if (rx_qconf == NULL) {
1118 otx2_err("Failed to allocate memory for rx_qconf");
1122 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1123 for (i = 0; i < nb_txq; i++) {
1124 if (txq[i] == NULL) {
1125 otx2_err("txq[%d] is already released", i);
1128 memcpy(&tx_qconf[i], &txq[i]->qconf, sizeof(*tx_qconf));
1129 otx2_nix_tx_queue_release(txq[i]);
1130 eth_dev->data->tx_queues[i] = NULL;
1133 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1134 for (i = 0; i < nb_rxq; i++) {
1135 if (rxq[i] == NULL) {
1136 otx2_err("rxq[%d] is already released", i);
1139 memcpy(&rx_qconf[i], &rxq[i]->qconf, sizeof(*rx_qconf));
1140 otx2_nix_rx_queue_release(rxq[i]);
1141 eth_dev->data->rx_queues[i] = NULL;
1144 dev->tx_qconf = tx_qconf;
1145 dev->rx_qconf = rx_qconf;
1158 nix_restore_queue_cfg(struct rte_eth_dev *eth_dev)
1160 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1161 struct otx2_eth_qconf *tx_qconf = dev->tx_qconf;
1162 struct otx2_eth_qconf *rx_qconf = dev->rx_qconf;
1163 struct otx2_eth_txq **txq;
1164 struct otx2_eth_rxq **rxq;
1165 int rc, i, nb_rxq, nb_txq;
1167 nb_rxq = RTE_MIN(dev->configured_nb_rx_qs, eth_dev->data->nb_rx_queues);
1168 nb_txq = RTE_MIN(dev->configured_nb_tx_qs, eth_dev->data->nb_tx_queues);
1171 /* Setup tx & rx queues with previous configuration so
1172 * that the queues can be functional in cases like ports
1173 * are started without re configuring queues.
1175 * Usual re config sequence is like below:
1176 * port_configure() {
1181 * queue_configure() {
1188 * In some application's control path, queue_configure() would
1189 * NOT be invoked for TXQs/RXQs in port_configure().
1190 * In such cases, queues can be functional after start as the
1191 * queues are already setup in port_configure().
1193 for (i = 0; i < nb_txq; i++) {
1194 rc = otx2_nix_tx_queue_setup(eth_dev, i, tx_qconf[i].nb_desc,
1195 tx_qconf[i].socket_id,
1196 &tx_qconf[i].conf.tx);
1198 otx2_err("Failed to setup tx queue rc=%d", rc);
1199 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1200 for (i -= 1; i >= 0; i--)
1201 otx2_nix_tx_queue_release(txq[i]);
1206 free(tx_qconf); tx_qconf = NULL;
1208 for (i = 0; i < nb_rxq; i++) {
1209 rc = otx2_nix_rx_queue_setup(eth_dev, i, rx_qconf[i].nb_desc,
1210 rx_qconf[i].socket_id,
1211 &rx_qconf[i].conf.rx,
1212 rx_qconf[i].mempool);
1214 otx2_err("Failed to setup rx queue rc=%d", rc);
1215 rxq = (struct otx2_eth_rxq **)eth_dev->data->rx_queues;
1216 for (i -= 1; i >= 0; i--)
1217 otx2_nix_rx_queue_release(rxq[i]);
1218 goto release_tx_queues;
1222 free(rx_qconf); rx_qconf = NULL;
1227 txq = (struct otx2_eth_txq **)eth_dev->data->tx_queues;
1228 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1229 otx2_nix_tx_queue_release(txq[i]);
1240 nix_eth_nop_burst(void *queue, struct rte_mbuf **mbufs, uint16_t pkts)
1242 RTE_SET_USED(queue);
1243 RTE_SET_USED(mbufs);
1250 nix_set_nop_rxtx_function(struct rte_eth_dev *eth_dev)
1252 /* These dummy functions are required for supporting
1253 * some applications which reconfigure queues without
1254 * stopping tx burst and rx burst threads(eg kni app)
1255 * When the queues context is saved, txq/rxqs are released
1256 * which caused app crash since rx/tx burst is still
1257 * on different lcores
1259 eth_dev->tx_pkt_burst = nix_eth_nop_burst;
1260 eth_dev->rx_pkt_burst = nix_eth_nop_burst;
1265 nix_lso_tcp(struct nix_lso_format_cfg *req, bool v4)
1267 volatile struct nix_lso_format *field;
1269 /* Format works only with TCP packet marked by OL3/OL4 */
1270 field = (volatile struct nix_lso_format *)&req->fields[0];
1271 req->field_mask = NIX_LSO_FIELD_MASK;
1272 /* Outer IPv4/IPv6 */
1273 field->layer = NIX_TXLAYER_OL3;
1274 field->offset = v4 ? 2 : 4;
1275 field->sizem1 = 1; /* 2B */
1276 field->alg = NIX_LSOALG_ADD_PAYLEN;
1280 field->layer = NIX_TXLAYER_OL3;
1283 /* Incremented linearly per segment */
1284 field->alg = NIX_LSOALG_ADD_SEGNUM;
1288 /* TCP sequence number update */
1289 field->layer = NIX_TXLAYER_OL4;
1291 field->sizem1 = 3; /* 4 bytes */
1292 field->alg = NIX_LSOALG_ADD_OFFSET;
1294 /* TCP flags field */
1295 field->layer = NIX_TXLAYER_OL4;
1298 field->alg = NIX_LSOALG_TCP_FLAGS;
1303 nix_lso_udp_tun_tcp(struct nix_lso_format_cfg *req,
1304 bool outer_v4, bool inner_v4)
1306 volatile struct nix_lso_format *field;
1308 field = (volatile struct nix_lso_format *)&req->fields[0];
1309 req->field_mask = NIX_LSO_FIELD_MASK;
1310 /* Outer IPv4/IPv6 len */
1311 field->layer = NIX_TXLAYER_OL3;
1312 field->offset = outer_v4 ? 2 : 4;
1313 field->sizem1 = 1; /* 2B */
1314 field->alg = NIX_LSOALG_ADD_PAYLEN;
1318 field->layer = NIX_TXLAYER_OL3;
1321 /* Incremented linearly per segment */
1322 field->alg = NIX_LSOALG_ADD_SEGNUM;
1326 /* Outer UDP length */
1327 field->layer = NIX_TXLAYER_OL4;
1330 field->alg = NIX_LSOALG_ADD_PAYLEN;
1333 /* Inner IPv4/IPv6 */
1334 field->layer = NIX_TXLAYER_IL3;
1335 field->offset = inner_v4 ? 2 : 4;
1336 field->sizem1 = 1; /* 2B */
1337 field->alg = NIX_LSOALG_ADD_PAYLEN;
1341 field->layer = NIX_TXLAYER_IL3;
1344 /* Incremented linearly per segment */
1345 field->alg = NIX_LSOALG_ADD_SEGNUM;
1349 /* TCP sequence number update */
1350 field->layer = NIX_TXLAYER_IL4;
1352 field->sizem1 = 3; /* 4 bytes */
1353 field->alg = NIX_LSOALG_ADD_OFFSET;
1356 /* TCP flags field */
1357 field->layer = NIX_TXLAYER_IL4;
1360 field->alg = NIX_LSOALG_TCP_FLAGS;
1365 nix_lso_tun_tcp(struct nix_lso_format_cfg *req,
1366 bool outer_v4, bool inner_v4)
1368 volatile struct nix_lso_format *field;
1370 field = (volatile struct nix_lso_format *)&req->fields[0];
1371 req->field_mask = NIX_LSO_FIELD_MASK;
1372 /* Outer IPv4/IPv6 len */
1373 field->layer = NIX_TXLAYER_OL3;
1374 field->offset = outer_v4 ? 2 : 4;
1375 field->sizem1 = 1; /* 2B */
1376 field->alg = NIX_LSOALG_ADD_PAYLEN;
1380 field->layer = NIX_TXLAYER_OL3;
1383 /* Incremented linearly per segment */
1384 field->alg = NIX_LSOALG_ADD_SEGNUM;
1388 /* Inner IPv4/IPv6 */
1389 field->layer = NIX_TXLAYER_IL3;
1390 field->offset = inner_v4 ? 2 : 4;
1391 field->sizem1 = 1; /* 2B */
1392 field->alg = NIX_LSOALG_ADD_PAYLEN;
1396 field->layer = NIX_TXLAYER_IL3;
1399 /* Incremented linearly per segment */
1400 field->alg = NIX_LSOALG_ADD_SEGNUM;
1404 /* TCP sequence number update */
1405 field->layer = NIX_TXLAYER_IL4;
1407 field->sizem1 = 3; /* 4 bytes */
1408 field->alg = NIX_LSOALG_ADD_OFFSET;
1411 /* TCP flags field */
1412 field->layer = NIX_TXLAYER_IL4;
1415 field->alg = NIX_LSOALG_TCP_FLAGS;
1420 nix_setup_lso_formats(struct otx2_eth_dev *dev)
1422 struct otx2_mbox *mbox = dev->mbox;
1423 struct nix_lso_format_cfg_rsp *rsp;
1424 struct nix_lso_format_cfg *req;
1428 /* Skip if TSO was not requested */
1429 if (!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSO_F))
1434 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1435 nix_lso_tcp(req, true);
1436 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1440 base = rsp->lso_format_idx;
1441 if (base != NIX_LSO_FORMAT_IDX_TSOV4)
1443 dev->lso_base_idx = base;
1444 otx2_nix_dbg("tcpv4 lso fmt=%u", base);
1450 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1451 nix_lso_tcp(req, false);
1452 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1456 if (rsp->lso_format_idx != base + 1)
1458 otx2_nix_dbg("tcpv6 lso fmt=%u\n", base + 1);
1461 * IPv4/UDP/TUN HDR/IPv4/TCP LSO
1463 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1464 nix_lso_udp_tun_tcp(req, true, true);
1465 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1469 if (rsp->lso_format_idx != base + 2)
1471 otx2_nix_dbg("udp tun v4v4 fmt=%u\n", base + 2);
1474 * IPv4/UDP/TUN HDR/IPv6/TCP LSO
1476 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1477 nix_lso_udp_tun_tcp(req, true, false);
1478 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1482 if (rsp->lso_format_idx != base + 3)
1484 otx2_nix_dbg("udp tun v4v6 fmt=%u\n", base + 3);
1487 * IPv6/UDP/TUN HDR/IPv4/TCP LSO
1489 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1490 nix_lso_udp_tun_tcp(req, false, true);
1491 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1495 if (rsp->lso_format_idx != base + 4)
1497 otx2_nix_dbg("udp tun v6v4 fmt=%u\n", base + 4);
1500 * IPv6/UDP/TUN HDR/IPv6/TCP LSO
1502 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1503 nix_lso_udp_tun_tcp(req, false, false);
1504 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1507 if (rsp->lso_format_idx != base + 5)
1509 otx2_nix_dbg("udp tun v6v6 fmt=%u\n", base + 5);
1512 * IPv4/TUN HDR/IPv4/TCP LSO
1514 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1515 nix_lso_tun_tcp(req, true, true);
1516 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1520 if (rsp->lso_format_idx != base + 6)
1522 otx2_nix_dbg("tun v4v4 fmt=%u\n", base + 6);
1525 * IPv4/TUN HDR/IPv6/TCP LSO
1527 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1528 nix_lso_tun_tcp(req, true, false);
1529 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1533 if (rsp->lso_format_idx != base + 7)
1535 otx2_nix_dbg("tun v4v6 fmt=%u\n", base + 7);
1538 * IPv6/TUN HDR/IPv4/TCP LSO
1540 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1541 nix_lso_tun_tcp(req, false, true);
1542 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1546 if (rsp->lso_format_idx != base + 8)
1548 otx2_nix_dbg("tun v6v4 fmt=%u\n", base + 8);
1551 * IPv6/TUN HDR/IPv6/TCP LSO
1553 req = otx2_mbox_alloc_msg_nix_lso_format_cfg(mbox);
1554 nix_lso_tun_tcp(req, false, false);
1555 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
1558 if (rsp->lso_format_idx != base + 9)
1560 otx2_nix_dbg("tun v6v6 fmt=%u\n", base + 9);
1565 otx2_nix_configure(struct rte_eth_dev *eth_dev)
1567 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1568 struct rte_eth_dev_data *data = eth_dev->data;
1569 struct rte_eth_conf *conf = &data->dev_conf;
1570 struct rte_eth_rxmode *rxmode = &conf->rxmode;
1571 struct rte_eth_txmode *txmode = &conf->txmode;
1572 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
1573 struct rte_ether_addr *ea;
1574 uint8_t nb_rxq, nb_txq;
1580 if (rte_eal_has_hugepages() == 0) {
1581 otx2_err("Huge page is not configured");
1582 goto fail_configure;
1585 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1586 otx2_err("Setting link speed/duplex not supported");
1587 goto fail_configure;
1590 if (conf->dcb_capability_en == 1) {
1591 otx2_err("dcb enable is not supported");
1592 goto fail_configure;
1595 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1596 otx2_err("Flow director is not supported");
1597 goto fail_configure;
1600 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1601 rxmode->mq_mode != ETH_MQ_RX_RSS) {
1602 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
1603 goto fail_configure;
1606 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
1607 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
1608 goto fail_configure;
1611 if (otx2_dev_is_Ax(dev) &&
1612 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
1613 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1614 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
1615 otx2_err("Outer IP and SCTP checksum unsupported");
1616 goto fail_configure;
1619 /* Free the resources allocated from the previous configure */
1620 if (dev->configured == 1) {
1621 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
1622 otx2_nix_vlan_fini(eth_dev);
1623 otx2_nix_mc_addr_list_uninstall(eth_dev);
1624 otx2_flow_free_all_resources(dev);
1625 oxt2_nix_unregister_queue_irqs(eth_dev);
1626 if (eth_dev->data->dev_conf.intr_conf.rxq)
1627 oxt2_nix_unregister_cq_irqs(eth_dev);
1628 nix_set_nop_rxtx_function(eth_dev);
1629 rc = nix_store_queue_cfg_and_then_release(eth_dev);
1631 goto fail_configure;
1632 otx2_nix_tm_fini(eth_dev);
1636 dev->rx_offloads = rxmode->offloads;
1637 dev->tx_offloads = txmode->offloads;
1638 dev->rx_offload_flags |= nix_rx_offload_flags(eth_dev);
1639 dev->tx_offload_flags |= nix_tx_offload_flags(eth_dev);
1640 dev->rss_info.rss_grps = NIX_RSS_GRPS;
1642 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
1643 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
1645 /* Alloc a nix lf */
1646 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
1648 otx2_err("Failed to init nix_lf rc=%d", rc);
1653 dev->npc_flow.switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
1654 otx2_err("Both PTP and switch header enabled");
1658 rc = nix_lf_switch_header_type_enable(dev, true);
1660 otx2_err("Failed to enable switch type nix_lf rc=%d", rc);
1664 rc = nix_setup_lso_formats(dev);
1666 otx2_err("failed to setup nix lso format fields, rc=%d", rc);
1671 rc = otx2_nix_rss_config(eth_dev);
1673 otx2_err("Failed to configure rss rc=%d", rc);
1677 /* Init the default TM scheduler hierarchy */
1678 rc = otx2_nix_tm_init_default(eth_dev);
1680 otx2_err("Failed to init traffic manager rc=%d", rc);
1684 rc = otx2_nix_vlan_offload_init(eth_dev);
1686 otx2_err("Failed to init vlan offload rc=%d", rc);
1690 /* Register queue IRQs */
1691 rc = oxt2_nix_register_queue_irqs(eth_dev);
1693 otx2_err("Failed to register queue interrupts rc=%d", rc);
1697 /* Register cq IRQs */
1698 if (eth_dev->data->dev_conf.intr_conf.rxq) {
1699 if (eth_dev->data->nb_rx_queues > dev->cints) {
1700 otx2_err("Rx interrupt cannot be enabled, rxq > %d",
1704 /* Rx interrupt feature cannot work with vector mode because,
1705 * vector mode doesn't process packets unless min 4 pkts are
1706 * received, while cq interrupts are generated even for 1 pkt
1709 dev->scalar_ena = true;
1711 rc = oxt2_nix_register_cq_irqs(eth_dev);
1713 otx2_err("Failed to register CQ interrupts rc=%d", rc);
1718 /* Configure loop back mode */
1719 rc = cgx_intlbk_enable(dev, eth_dev->data->dev_conf.lpbk_mode);
1721 otx2_err("Failed to configure cgx loop back mode rc=%d", rc);
1725 rc = otx2_nix_rxchan_bpid_cfg(eth_dev, true);
1727 otx2_err("Failed to configure nix rx chan bpid cfg rc=%d", rc);
1731 rc = otx2_nix_mc_addr_list_install(eth_dev);
1733 otx2_err("Failed to install mc address list rc=%d", rc);
1738 * Restore queue config when reconfigure followed by
1739 * reconfigure and no queue configure invoked from application case.
1741 if (dev->configured == 1) {
1742 rc = nix_restore_queue_cfg(eth_dev);
1744 goto uninstall_mc_list;
1747 /* Update the mac address */
1748 ea = eth_dev->data->mac_addrs;
1749 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
1750 if (rte_is_zero_ether_addr(ea))
1751 rte_eth_random_addr((uint8_t *)ea);
1753 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
1755 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
1756 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
1757 " rx_flags=0x%x tx_flags=0x%x",
1758 eth_dev->data->port_id, ea_fmt, nb_rxq,
1759 nb_txq, dev->rx_offloads, dev->tx_offloads,
1760 dev->rx_offload_flags, dev->tx_offload_flags);
1763 dev->configured = 1;
1764 dev->configured_nb_rx_qs = data->nb_rx_queues;
1765 dev->configured_nb_tx_qs = data->nb_tx_queues;
1769 otx2_nix_mc_addr_list_uninstall(eth_dev);
1771 oxt2_nix_unregister_cq_irqs(eth_dev);
1773 oxt2_nix_unregister_queue_irqs(eth_dev);
1775 otx2_nix_vlan_fini(eth_dev);
1777 otx2_nix_tm_fini(eth_dev);
1781 dev->rx_offload_flags &= ~nix_rx_offload_flags(eth_dev);
1782 dev->tx_offload_flags &= ~nix_tx_offload_flags(eth_dev);
1784 dev->configured = 0;
1789 otx2_nix_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1791 struct rte_eth_dev_data *data = eth_dev->data;
1792 struct otx2_eth_txq *txq;
1795 txq = eth_dev->data->tx_queues[qidx];
1797 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1800 rc = otx2_nix_sq_sqb_aura_fc(txq, true);
1802 otx2_err("Failed to enable sqb aura fc, txq=%u, rc=%d",
1807 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1814 otx2_nix_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1816 struct rte_eth_dev_data *data = eth_dev->data;
1817 struct otx2_eth_txq *txq;
1820 txq = eth_dev->data->tx_queues[qidx];
1822 if (data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1825 txq->fc_cache_pkts = 0;
1827 rc = otx2_nix_sq_sqb_aura_fc(txq, false);
1829 otx2_err("Failed to disable sqb aura fc, txq=%u, rc=%d",
1834 data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1841 otx2_nix_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t qidx)
1843 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1844 struct rte_eth_dev_data *data = eth_dev->data;
1847 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED)
1850 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, true);
1852 otx2_err("Failed to enable rxq=%u, rc=%d", qidx, rc);
1856 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
1863 otx2_nix_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t qidx)
1865 struct otx2_eth_rxq *rxq = eth_dev->data->rx_queues[qidx];
1866 struct rte_eth_dev_data *data = eth_dev->data;
1869 if (data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED)
1872 rc = nix_rq_enb_dis(rxq->eth_dev, rxq, false);
1874 otx2_err("Failed to disable rxq=%u, rc=%d", qidx, rc);
1878 data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
1885 otx2_nix_dev_stop(struct rte_eth_dev *eth_dev)
1887 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1888 struct rte_mbuf *rx_pkts[32];
1889 struct otx2_eth_rxq *rxq;
1890 int count, i, j, rc;
1892 nix_lf_switch_header_type_enable(dev, false);
1893 nix_cgx_stop_link_event(dev);
1894 npc_rx_disable(dev);
1896 /* Stop rx queues and free up pkts pending */
1897 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1898 rc = otx2_nix_rx_queue_stop(eth_dev, i);
1902 rxq = eth_dev->data->rx_queues[i];
1903 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1905 for (j = 0; j < count; j++)
1906 rte_pktmbuf_free(rx_pkts[j]);
1907 count = dev->rx_pkt_burst_no_offload(rxq, rx_pkts, 32);
1911 /* Stop tx queues */
1912 for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
1913 otx2_nix_tx_queue_stop(eth_dev, i);
1917 otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
1919 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
1922 /* MTU recalculate should be avoided here if PTP is enabled by PF, as
1923 * otx2_nix_recalc_mtu would be invoked during otx2_nix_ptp_enable_vf
1926 if (eth_dev->data->nb_rx_queues != 0 && !otx2_ethdev_is_ptp_en(dev)) {
1927 rc = otx2_nix_recalc_mtu(eth_dev);
1932 /* Start rx queues */
1933 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
1934 rc = otx2_nix_rx_queue_start(eth_dev, i);
1939 /* Start tx queues */
1940 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
1941 rc = otx2_nix_tx_queue_start(eth_dev, i);
1946 rc = otx2_nix_update_flow_ctrl_mode(eth_dev);
1948 otx2_err("Failed to update flow ctrl mode %d", rc);
1952 /* Enable PTP if it was requested by the app or if it is already
1953 * enabled in PF owning this VF
1955 memset(&dev->tstamp, 0, sizeof(struct otx2_timesync_info));
1956 if ((dev->rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) ||
1957 otx2_ethdev_is_ptp_en(dev))
1958 otx2_nix_timesync_enable(eth_dev);
1960 otx2_nix_timesync_disable(eth_dev);
1962 /* Update VF about data off shifted by 8 bytes if PTP already
1963 * enabled in PF owning this VF
1965 if (otx2_ethdev_is_ptp_en(dev) && otx2_dev_is_vf(dev))
1966 otx2_nix_ptp_enable_vf(eth_dev);
1968 rc = npc_rx_enable(dev);
1970 otx2_err("Failed to enable NPC rx %d", rc);
1974 otx2_nix_toggle_flag_link_cfg(dev, true);
1976 rc = nix_cgx_start_link_event(dev);
1978 otx2_err("Failed to start cgx link event %d", rc);
1982 otx2_nix_toggle_flag_link_cfg(dev, false);
1983 otx2_eth_set_tx_function(eth_dev);
1984 otx2_eth_set_rx_function(eth_dev);
1989 npc_rx_disable(dev);
1990 otx2_nix_toggle_flag_link_cfg(dev, false);
1994 static int otx2_nix_dev_reset(struct rte_eth_dev *eth_dev);
1995 static void otx2_nix_dev_close(struct rte_eth_dev *eth_dev);
1997 /* Initialize and register driver with DPDK Application */
1998 static const struct eth_dev_ops otx2_eth_dev_ops = {
1999 .dev_infos_get = otx2_nix_info_get,
2000 .dev_configure = otx2_nix_configure,
2001 .link_update = otx2_nix_link_update,
2002 .tx_queue_setup = otx2_nix_tx_queue_setup,
2003 .tx_queue_release = otx2_nix_tx_queue_release,
2004 .rx_queue_setup = otx2_nix_rx_queue_setup,
2005 .rx_queue_release = otx2_nix_rx_queue_release,
2006 .dev_start = otx2_nix_dev_start,
2007 .dev_stop = otx2_nix_dev_stop,
2008 .dev_close = otx2_nix_dev_close,
2009 .tx_queue_start = otx2_nix_tx_queue_start,
2010 .tx_queue_stop = otx2_nix_tx_queue_stop,
2011 .rx_queue_start = otx2_nix_rx_queue_start,
2012 .rx_queue_stop = otx2_nix_rx_queue_stop,
2013 .dev_set_link_up = otx2_nix_dev_set_link_up,
2014 .dev_set_link_down = otx2_nix_dev_set_link_down,
2015 .dev_supported_ptypes_get = otx2_nix_supported_ptypes_get,
2016 .dev_ptypes_set = otx2_nix_ptypes_set,
2017 .dev_reset = otx2_nix_dev_reset,
2018 .stats_get = otx2_nix_dev_stats_get,
2019 .stats_reset = otx2_nix_dev_stats_reset,
2020 .get_reg = otx2_nix_dev_get_reg,
2021 .mtu_set = otx2_nix_mtu_set,
2022 .mac_addr_add = otx2_nix_mac_addr_add,
2023 .mac_addr_remove = otx2_nix_mac_addr_del,
2024 .mac_addr_set = otx2_nix_mac_addr_set,
2025 .set_mc_addr_list = otx2_nix_set_mc_addr_list,
2026 .promiscuous_enable = otx2_nix_promisc_enable,
2027 .promiscuous_disable = otx2_nix_promisc_disable,
2028 .allmulticast_enable = otx2_nix_allmulticast_enable,
2029 .allmulticast_disable = otx2_nix_allmulticast_disable,
2030 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
2031 .reta_update = otx2_nix_dev_reta_update,
2032 .reta_query = otx2_nix_dev_reta_query,
2033 .rss_hash_update = otx2_nix_rss_hash_update,
2034 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
2035 .xstats_get = otx2_nix_xstats_get,
2036 .xstats_get_names = otx2_nix_xstats_get_names,
2037 .xstats_reset = otx2_nix_xstats_reset,
2038 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
2039 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
2040 .rxq_info_get = otx2_nix_rxq_info_get,
2041 .txq_info_get = otx2_nix_txq_info_get,
2042 .rx_burst_mode_get = otx2_rx_burst_mode_get,
2043 .tx_burst_mode_get = otx2_tx_burst_mode_get,
2044 .rx_queue_count = otx2_nix_rx_queue_count,
2045 .rx_descriptor_done = otx2_nix_rx_descriptor_done,
2046 .rx_descriptor_status = otx2_nix_rx_descriptor_status,
2047 .tx_descriptor_status = otx2_nix_tx_descriptor_status,
2048 .tx_done_cleanup = otx2_nix_tx_done_cleanup,
2049 .pool_ops_supported = otx2_nix_pool_ops_supported,
2050 .filter_ctrl = otx2_nix_dev_filter_ctrl,
2051 .get_module_info = otx2_nix_get_module_info,
2052 .get_module_eeprom = otx2_nix_get_module_eeprom,
2053 .fw_version_get = otx2_nix_fw_version_get,
2054 .flow_ctrl_get = otx2_nix_flow_ctrl_get,
2055 .flow_ctrl_set = otx2_nix_flow_ctrl_set,
2056 .timesync_enable = otx2_nix_timesync_enable,
2057 .timesync_disable = otx2_nix_timesync_disable,
2058 .timesync_read_rx_timestamp = otx2_nix_timesync_read_rx_timestamp,
2059 .timesync_read_tx_timestamp = otx2_nix_timesync_read_tx_timestamp,
2060 .timesync_adjust_time = otx2_nix_timesync_adjust_time,
2061 .timesync_read_time = otx2_nix_timesync_read_time,
2062 .timesync_write_time = otx2_nix_timesync_write_time,
2063 .vlan_offload_set = otx2_nix_vlan_offload_set,
2064 .vlan_filter_set = otx2_nix_vlan_filter_set,
2065 .vlan_strip_queue_set = otx2_nix_vlan_strip_queue_set,
2066 .vlan_tpid_set = otx2_nix_vlan_tpid_set,
2067 .vlan_pvid_set = otx2_nix_vlan_pvid_set,
2068 .rx_queue_intr_enable = otx2_nix_rx_queue_intr_enable,
2069 .rx_queue_intr_disable = otx2_nix_rx_queue_intr_disable,
2070 .read_clock = otx2_nix_read_clock,
2074 nix_lf_attach(struct otx2_eth_dev *dev)
2076 struct otx2_mbox *mbox = dev->mbox;
2077 struct rsrc_attach_req *req;
2079 /* Attach NIX(lf) */
2080 req = otx2_mbox_alloc_msg_attach_resources(mbox);
2084 return otx2_mbox_process(mbox);
2088 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
2090 struct otx2_mbox *mbox = dev->mbox;
2091 struct msix_offset_rsp *msix_rsp;
2094 /* Get NPA and NIX MSIX vector offsets */
2095 otx2_mbox_alloc_msg_msix_offset(mbox);
2097 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
2099 dev->nix_msixoff = msix_rsp->nix_msixoff;
2105 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
2107 struct rsrc_detach_req *req;
2109 req = otx2_mbox_alloc_msg_detach_resources(mbox);
2111 /* Detach all except npa lf */
2112 req->partial = true;
2119 return otx2_mbox_process(mbox);
2123 otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
2125 if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
2126 pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2132 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
2134 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2135 struct rte_pci_device *pci_dev;
2136 int rc, max_entries;
2138 eth_dev->dev_ops = &otx2_eth_dev_ops;
2140 /* For secondary processes, the primary has done all the work */
2141 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2142 /* Setup callbacks for secondary process */
2143 otx2_eth_set_tx_function(eth_dev);
2144 otx2_eth_set_rx_function(eth_dev);
2148 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2150 rte_eth_copy_pci_info(eth_dev, pci_dev);
2151 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
2153 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
2154 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
2155 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
2157 /* Parse devargs string */
2158 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
2160 otx2_err("Failed to parse devargs rc=%d", rc);
2164 if (!dev->mbox_active) {
2165 /* Initialize the base otx2_dev object
2166 * only if already present
2168 rc = otx2_dev_init(pci_dev, dev);
2170 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
2174 if (otx2_eth_dev_is_sdp(pci_dev))
2175 dev->sdp_link = true;
2177 dev->sdp_link = false;
2178 /* Device generic callbacks */
2179 dev->ops = &otx2_dev_ops;
2180 dev->eth_dev = eth_dev;
2182 /* Grab the NPA LF if required */
2183 rc = otx2_npa_lf_init(pci_dev, dev);
2185 goto otx2_dev_uninit;
2187 dev->configured = 0;
2188 dev->drv_inited = true;
2189 dev->ptype_disable = 0;
2190 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
2191 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
2194 rc = nix_lf_attach(dev);
2196 goto otx2_npa_uninit;
2198 /* Get NIX MSIX offset */
2199 rc = nix_lf_get_msix_offset(dev);
2201 goto otx2_npa_uninit;
2203 /* Register LF irq handlers */
2204 rc = otx2_nix_register_irqs(eth_dev);
2208 /* Get maximum number of supported MAC entries */
2209 max_entries = otx2_cgx_mac_max_entries_get(dev);
2210 if (max_entries < 0) {
2211 otx2_err("Failed to get max entries for mac addr");
2213 goto unregister_irq;
2216 /* For VFs, returned max_entries will be 0. But to keep default MAC
2217 * address, one entry must be allocated. So setting up to 1.
2219 if (max_entries == 0)
2222 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
2223 RTE_ETHER_ADDR_LEN, 0);
2224 if (eth_dev->data->mac_addrs == NULL) {
2225 otx2_err("Failed to allocate memory for mac addr");
2227 goto unregister_irq;
2230 dev->max_mac_entries = max_entries;
2232 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
2234 goto free_mac_addrs;
2236 /* Update the mac address */
2237 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
2239 /* Also sync same MAC address to CGX table */
2240 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
2242 /* Initialize the tm data structures */
2243 otx2_nix_tm_conf_init(eth_dev);
2245 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
2246 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
2248 if (otx2_dev_is_96xx_A0(dev) ||
2249 otx2_dev_is_95xx_Ax(dev)) {
2250 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
2251 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
2254 /* Create security ctx */
2255 rc = otx2_eth_sec_ctx_create(eth_dev);
2257 goto free_mac_addrs;
2258 dev->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY;
2259 dev->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY;
2261 /* Initialize rte-flow */
2262 rc = otx2_flow_init(dev);
2264 goto sec_ctx_destroy;
2266 otx2_nix_mc_filter_init(dev);
2268 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
2269 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
2270 eth_dev->data->port_id, dev->pf, dev->vf,
2271 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
2272 dev->rx_offload_capa, dev->tx_offload_capa);
2276 otx2_eth_sec_ctx_destroy(eth_dev);
2278 rte_free(eth_dev->data->mac_addrs);
2280 otx2_nix_unregister_irqs(eth_dev);
2282 otx2_eth_dev_lf_detach(dev->mbox);
2286 otx2_dev_fini(pci_dev, dev);
2288 otx2_err("Failed to init nix eth_dev rc=%d", rc);
2293 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
2295 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
2296 struct rte_pci_device *pci_dev;
2299 /* Nothing to be done for secondary processes */
2300 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2303 /* Clear the flag since we are closing down */
2304 dev->configured = 0;
2306 /* Disable nix bpid config */
2307 otx2_nix_rxchan_bpid_cfg(eth_dev, false);
2309 npc_rx_disable(dev);
2311 /* Disable vlan offloads */
2312 otx2_nix_vlan_fini(eth_dev);
2314 /* Disable other rte_flow entries */
2315 otx2_flow_fini(dev);
2317 /* Free multicast filter list */
2318 otx2_nix_mc_filter_fini(dev);
2320 /* Disable PTP if already enabled */
2321 if (otx2_ethdev_is_ptp_en(dev))
2322 otx2_nix_timesync_disable(eth_dev);
2324 nix_cgx_stop_link_event(dev);
2327 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
2328 otx2_nix_tx_queue_release(eth_dev->data->tx_queues[i]);
2329 eth_dev->data->tx_queues[i] = NULL;
2331 eth_dev->data->nb_tx_queues = 0;
2333 /* Free up RQ's and CQ's */
2334 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
2335 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[i]);
2336 eth_dev->data->rx_queues[i] = NULL;
2338 eth_dev->data->nb_rx_queues = 0;
2340 /* Free tm resources */
2341 rc = otx2_nix_tm_fini(eth_dev);
2343 otx2_err("Failed to cleanup tm, rc=%d", rc);
2345 /* Unregister queue irqs */
2346 oxt2_nix_unregister_queue_irqs(eth_dev);
2348 /* Unregister cq irqs */
2349 if (eth_dev->data->dev_conf.intr_conf.rxq)
2350 oxt2_nix_unregister_cq_irqs(eth_dev);
2352 rc = nix_lf_free(dev);
2354 otx2_err("Failed to free nix lf, rc=%d", rc);
2356 rc = otx2_npa_lf_fini();
2358 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
2360 /* Destroy security ctx */
2361 otx2_eth_sec_ctx_destroy(eth_dev);
2363 rte_free(eth_dev->data->mac_addrs);
2364 eth_dev->data->mac_addrs = NULL;
2365 dev->drv_inited = false;
2367 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2368 otx2_nix_unregister_irqs(eth_dev);
2370 rc = otx2_eth_dev_lf_detach(dev->mbox);
2372 otx2_err("Failed to detach resources, rc=%d", rc);
2374 /* Check if mbox close is needed */
2378 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
2379 /* Will be freed later by PMD */
2380 eth_dev->data->dev_private = NULL;
2384 otx2_dev_fini(pci_dev, dev);
2389 otx2_nix_dev_close(struct rte_eth_dev *eth_dev)
2391 otx2_eth_dev_uninit(eth_dev, true);
2395 otx2_nix_dev_reset(struct rte_eth_dev *eth_dev)
2399 rc = otx2_eth_dev_uninit(eth_dev, false);
2403 return otx2_eth_dev_init(eth_dev);
2407 nix_remove(struct rte_pci_device *pci_dev)
2409 struct rte_eth_dev *eth_dev;
2410 struct otx2_idev_cfg *idev;
2411 struct otx2_dev *otx2_dev;
2414 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
2416 /* Cleanup eth dev */
2417 rc = otx2_eth_dev_uninit(eth_dev, true);
2421 rte_eth_dev_pci_release(eth_dev);
2424 /* Nothing to be done for secondary processes */
2425 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2428 /* Check for common resources */
2429 idev = otx2_intra_dev_get_cfg();
2430 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
2433 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
2435 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
2438 /* Safe to cleanup mbox as no more users */
2439 otx2_dev_fini(pci_dev, otx2_dev);
2444 otx2_info("%s: common resource in use by other devices", pci_dev->name);
2449 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
2453 RTE_SET_USED(pci_drv);
2455 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
2458 /* On error on secondary, recheck if port exists in primary or
2459 * in mid of detach state.
2461 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
2462 if (!rte_eth_dev_allocated(pci_dev->device.name))
2467 static const struct rte_pci_id pci_nix_map[] = {
2469 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
2472 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
2475 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2476 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
2479 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2480 PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
2483 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
2484 PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
2491 static struct rte_pci_driver pci_nix = {
2492 .id_table = pci_nix_map,
2493 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA |
2494 RTE_PCI_DRV_INTR_LSC,
2496 .remove = nix_remove,
2499 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
2500 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
2501 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");