1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
8 #include <rte_ethdev_pci.h>
10 #include <rte_malloc.h>
12 #include <rte_mbuf_pool_ops.h>
13 #include <rte_mempool.h>
15 #include "otx2_ethdev.h"
18 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
20 RTE_SET_USED(eth_dev);
24 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
26 RTE_SET_USED(eth_dev);
29 static inline uint64_t
30 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
32 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
34 if (otx2_dev_is_vf(dev))
35 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
40 static inline uint64_t
41 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
45 return NIX_TX_OFFLOAD_CAPA;
48 static const struct otx2_dev_ops otx2_dev_ops = {
49 .link_status_update = otx2_eth_dev_link_status_update,
53 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
55 struct otx2_mbox *mbox = dev->mbox;
56 struct nix_lf_alloc_req *req;
57 struct nix_lf_alloc_rsp *rsp;
60 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
64 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
65 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
66 req->xqe_sz = NIX_XQESZ_W16;
67 req->rss_sz = dev->rss_info.rss_size;
68 req->rss_grps = NIX_RSS_GRPS;
69 req->npa_func = otx2_npa_pf_func_get();
70 req->sso_func = otx2_sso_pf_func_get();
71 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
72 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
73 DEV_RX_OFFLOAD_UDP_CKSUM)) {
74 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
75 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
78 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
82 dev->sqb_size = rsp->sqb_size;
83 dev->tx_chan_base = rsp->tx_chan_base;
84 dev->rx_chan_base = rsp->rx_chan_base;
85 dev->rx_chan_cnt = rsp->rx_chan_cnt;
86 dev->tx_chan_cnt = rsp->tx_chan_cnt;
87 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
88 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
89 dev->lf_tx_stats = rsp->lf_tx_stats;
90 dev->lf_rx_stats = rsp->lf_rx_stats;
91 dev->cints = rsp->cints;
92 dev->qints = rsp->qints;
93 dev->npc_flow.channel = dev->rx_chan_base;
99 nix_lf_free(struct otx2_eth_dev *dev)
101 struct otx2_mbox *mbox = dev->mbox;
102 struct nix_lf_free_req *req;
103 struct ndc_sync_op *ndc_req;
106 /* Sync NDC-NIX for LF */
107 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
108 ndc_req->nix_lf_tx_sync = 1;
109 ndc_req->nix_lf_rx_sync = 1;
110 rc = otx2_mbox_process(mbox);
112 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
114 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
115 /* Let AF driver free all this nix lf's
116 * NPC entries allocated using NPC MBOX.
120 return otx2_mbox_process(mbox);
124 nix_rx_queue_reset(struct otx2_eth_rxq *rxq)
130 static inline uint32_t
131 nix_qsize_to_val(enum nix_q_size_e qsize)
133 return (16UL << (qsize * 2));
136 static inline enum nix_q_size_e
137 nix_qsize_clampup_get(struct otx2_eth_dev *dev, uint32_t val)
141 if (otx2_ethdev_fixup_is_min_4k_q(dev))
146 for (; i < nix_q_size_max; i++)
147 if (val <= nix_qsize_to_val(i))
150 if (i >= nix_q_size_max)
151 i = nix_q_size_max - 1;
157 nix_cq_rq_init(struct rte_eth_dev *eth_dev, struct otx2_eth_dev *dev,
158 uint16_t qid, struct otx2_eth_rxq *rxq, struct rte_mempool *mp)
160 struct otx2_mbox *mbox = dev->mbox;
161 const struct rte_memzone *rz;
162 uint32_t ring_size, cq_size;
163 struct nix_aq_enq_req *aq;
168 ring_size = cq_size * NIX_CQ_ENTRY_SZ;
169 rz = rte_eth_dma_zone_reserve(eth_dev, "cq", qid, ring_size,
170 NIX_CQ_ALIGN, dev->node);
172 otx2_err("Failed to allocate mem for cq hw ring");
176 memset(rz->addr, 0, rz->len);
177 rxq->desc = (uintptr_t)rz->addr;
178 rxq->qmask = cq_size - 1;
180 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
182 aq->ctype = NIX_AQ_CTYPE_CQ;
183 aq->op = NIX_AQ_INSTOP_INIT;
187 aq->cq.qsize = rxq->qsize;
188 aq->cq.base = rz->iova;
189 aq->cq.avg_level = 0xff;
190 aq->cq.cq_err_int_ena = BIT(NIX_CQERRINT_CQE_FAULT);
191 aq->cq.cq_err_int_ena |= BIT(NIX_CQERRINT_DOOR_ERR);
193 /* Many to one reduction */
194 aq->cq.qint_idx = qid % dev->qints;
196 if (otx2_ethdev_fixup_is_limit_cq_full(dev)) {
197 uint16_t min_rx_drop;
198 const float rx_cq_skid = 1024 * 256;
200 min_rx_drop = ceil(rx_cq_skid / (float)cq_size);
201 aq->cq.drop = min_rx_drop;
205 rc = otx2_mbox_process(mbox);
207 otx2_err("Failed to init cq context");
211 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
213 aq->ctype = NIX_AQ_CTYPE_RQ;
214 aq->op = NIX_AQ_INSTOP_INIT;
217 aq->rq.cq = qid; /* RQ to CQ 1:1 mapped */
219 aq->rq.lpb_aura = npa_lf_aura_handle_to_aura(mp->pool_id);
220 first_skip = (sizeof(struct rte_mbuf));
221 first_skip += RTE_PKTMBUF_HEADROOM;
222 first_skip += rte_pktmbuf_priv_size(mp);
223 rxq->data_off = first_skip;
225 first_skip /= 8; /* Expressed in number of dwords */
226 aq->rq.first_skip = first_skip;
227 aq->rq.later_skip = (sizeof(struct rte_mbuf) / 8);
228 aq->rq.flow_tagw = 32; /* 32-bits */
229 aq->rq.lpb_sizem1 = rte_pktmbuf_data_room_size(mp);
230 aq->rq.lpb_sizem1 += rte_pktmbuf_priv_size(mp);
231 aq->rq.lpb_sizem1 += sizeof(struct rte_mbuf);
232 aq->rq.lpb_sizem1 /= 8;
233 aq->rq.lpb_sizem1 -= 1; /* Expressed in size minus one */
235 aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
236 aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
237 aq->rq.rq_int_ena = 0;
238 /* Many to one reduction */
239 aq->rq.qint_idx = qid % dev->qints;
241 if (otx2_ethdev_fixup_is_limit_cq_full(dev))
242 aq->rq.xqe_drop_ena = 1;
244 rc = otx2_mbox_process(mbox);
246 otx2_err("Failed to init rq context");
256 nix_cq_rq_uninit(struct rte_eth_dev *eth_dev, struct otx2_eth_rxq *rxq)
258 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
259 struct otx2_mbox *mbox = dev->mbox;
260 struct nix_aq_enq_req *aq;
263 /* RQ is already disabled */
265 aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
267 aq->ctype = NIX_AQ_CTYPE_CQ;
268 aq->op = NIX_AQ_INSTOP_WRITE;
271 aq->cq_mask.ena = ~(aq->cq_mask.ena);
273 rc = otx2_mbox_process(mbox);
275 otx2_err("Failed to disable cq context");
283 nix_get_data_off(struct otx2_eth_dev *dev)
291 otx2_nix_rxq_mbuf_setup(struct otx2_eth_dev *dev, uint16_t port_id)
293 struct rte_mbuf mb_def;
296 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
297 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
298 offsetof(struct rte_mbuf, data_off) != 2);
299 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
300 offsetof(struct rte_mbuf, data_off) != 4);
301 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
302 offsetof(struct rte_mbuf, data_off) != 6);
304 mb_def.data_off = RTE_PKTMBUF_HEADROOM + nix_get_data_off(dev);
305 mb_def.port = port_id;
306 rte_mbuf_refcnt_set(&mb_def, 1);
308 /* Prevent compiler reordering: rearm_data covers previous fields */
309 rte_compiler_barrier();
310 tmp = (uint64_t *)&mb_def.rearm_data;
316 otx2_nix_rx_queue_release(void *rx_queue)
318 struct otx2_eth_rxq *rxq = rx_queue;
323 otx2_nix_dbg("Releasing rxq %u", rxq->rq);
324 nix_cq_rq_uninit(rxq->eth_dev, rxq);
329 otx2_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t rq,
330 uint16_t nb_desc, unsigned int socket,
331 const struct rte_eth_rxconf *rx_conf,
332 struct rte_mempool *mp)
334 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
335 struct rte_mempool_ops *ops;
336 struct otx2_eth_rxq *rxq;
337 const char *platform_ops;
338 enum nix_q_size_e qsize;
344 /* Compile time check to make sure all fast path elements in a CL */
345 RTE_BUILD_BUG_ON(offsetof(struct otx2_eth_rxq, slow_path_start) >= 128);
348 if (rx_conf->rx_deferred_start == 1) {
349 otx2_err("Deferred Rx start is not supported");
353 platform_ops = rte_mbuf_platform_mempool_ops();
354 /* This driver needs octeontx2_npa mempool ops to work */
355 ops = rte_mempool_get_ops(mp->ops_index);
356 if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
357 otx2_err("mempool ops should be of octeontx2_npa type");
361 if (mp->pool_id == 0) {
362 otx2_err("Invalid pool_id");
366 /* Free memory prior to re-allocation if needed */
367 if (eth_dev->data->rx_queues[rq] != NULL) {
368 otx2_nix_dbg("Freeing memory prior to re-allocation %d", rq);
369 otx2_nix_rx_queue_release(eth_dev->data->rx_queues[rq]);
370 eth_dev->data->rx_queues[rq] = NULL;
373 offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
374 dev->rx_offloads |= offloads;
376 /* Find the CQ queue size */
377 qsize = nix_qsize_clampup_get(dev, nb_desc);
378 /* Allocate rxq memory */
379 rxq = rte_zmalloc_socket("otx2 rxq", sizeof(*rxq), OTX2_ALIGN, socket);
381 otx2_err("Failed to allocate rq=%d", rq);
386 rxq->eth_dev = eth_dev;
388 rxq->cq_door = dev->base + NIX_LF_CQ_OP_DOOR;
389 rxq->cq_status = (int64_t *)(dev->base + NIX_LF_CQ_OP_STATUS);
390 rxq->wdata = (uint64_t)rq << 32;
391 rxq->aura = npa_lf_aura_handle_to_aura(mp->pool_id);
392 rxq->mbuf_initializer = otx2_nix_rxq_mbuf_setup(dev,
393 eth_dev->data->port_id);
394 rxq->offloads = offloads;
396 rxq->qlen = nix_qsize_to_val(qsize);
399 /* Alloc completion queue */
400 rc = nix_cq_rq_init(eth_dev, dev, rq, rxq, mp);
402 otx2_err("Failed to allocate rxq=%u", rq);
406 rxq->qconf.socket_id = socket;
407 rxq->qconf.nb_desc = nb_desc;
408 rxq->qconf.mempool = mp;
409 memcpy(&rxq->qconf.conf.rx, rx_conf, sizeof(struct rte_eth_rxconf));
411 nix_rx_queue_reset(rxq);
412 otx2_nix_dbg("rq=%d pool=%s qsize=%d nb_desc=%d->%d",
413 rq, mp->name, qsize, nb_desc, rxq->qlen);
415 eth_dev->data->rx_queues[rq] = rxq;
416 eth_dev->data->rx_queue_state[rq] = RTE_ETH_QUEUE_STATE_STOPPED;
420 otx2_nix_rx_queue_release(rxq);
426 otx2_nix_configure(struct rte_eth_dev *eth_dev)
428 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
429 struct rte_eth_dev_data *data = eth_dev->data;
430 struct rte_eth_conf *conf = &data->dev_conf;
431 struct rte_eth_rxmode *rxmode = &conf->rxmode;
432 struct rte_eth_txmode *txmode = &conf->txmode;
433 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
434 struct rte_ether_addr *ea;
435 uint8_t nb_rxq, nb_txq;
441 if (rte_eal_has_hugepages() == 0) {
442 otx2_err("Huge page is not configured");
446 if (rte_eal_iova_mode() != RTE_IOVA_VA) {
447 otx2_err("iova mode should be va");
451 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
452 otx2_err("Setting link speed/duplex not supported");
456 if (conf->dcb_capability_en == 1) {
457 otx2_err("dcb enable is not supported");
461 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
462 otx2_err("Flow director is not supported");
466 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
467 rxmode->mq_mode != ETH_MQ_RX_RSS) {
468 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
472 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
473 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
477 /* Free the resources allocated from the previous configure */
478 if (dev->configured == 1) {
479 oxt2_nix_unregister_queue_irqs(eth_dev);
483 if (otx2_dev_is_A0(dev) &&
484 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
485 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
486 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
487 otx2_err("Outer IP and SCTP checksum unsupported");
492 dev->rx_offloads = rxmode->offloads;
493 dev->tx_offloads = txmode->offloads;
494 dev->rss_info.rss_grps = NIX_RSS_GRPS;
496 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
497 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
500 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
502 otx2_err("Failed to init nix_lf rc=%d", rc);
507 rc = otx2_nix_rss_config(eth_dev);
509 otx2_err("Failed to configure rss rc=%d", rc);
513 /* Register queue IRQs */
514 rc = oxt2_nix_register_queue_irqs(eth_dev);
516 otx2_err("Failed to register queue interrupts rc=%d", rc);
520 /* Update the mac address */
521 ea = eth_dev->data->mac_addrs;
522 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
523 if (rte_is_zero_ether_addr(ea))
524 rte_eth_random_addr((uint8_t *)ea);
526 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
528 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
529 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
530 " rx_flags=0x%x tx_flags=0x%x",
531 eth_dev->data->port_id, ea_fmt, nb_rxq,
532 nb_txq, dev->rx_offloads, dev->tx_offloads,
533 dev->rx_offload_flags, dev->tx_offload_flags);
537 dev->configured_nb_rx_qs = data->nb_rx_queues;
538 dev->configured_nb_tx_qs = data->nb_tx_queues;
542 rc = nix_lf_free(dev);
547 /* Initialize and register driver with DPDK Application */
548 static const struct eth_dev_ops otx2_eth_dev_ops = {
549 .dev_infos_get = otx2_nix_info_get,
550 .dev_configure = otx2_nix_configure,
551 .link_update = otx2_nix_link_update,
552 .rx_queue_setup = otx2_nix_rx_queue_setup,
553 .rx_queue_release = otx2_nix_rx_queue_release,
554 .stats_get = otx2_nix_dev_stats_get,
555 .stats_reset = otx2_nix_dev_stats_reset,
556 .get_reg = otx2_nix_dev_get_reg,
557 .mac_addr_add = otx2_nix_mac_addr_add,
558 .mac_addr_remove = otx2_nix_mac_addr_del,
559 .mac_addr_set = otx2_nix_mac_addr_set,
560 .promiscuous_enable = otx2_nix_promisc_enable,
561 .promiscuous_disable = otx2_nix_promisc_disable,
562 .allmulticast_enable = otx2_nix_allmulticast_enable,
563 .allmulticast_disable = otx2_nix_allmulticast_disable,
564 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
565 .reta_update = otx2_nix_dev_reta_update,
566 .reta_query = otx2_nix_dev_reta_query,
567 .rss_hash_update = otx2_nix_rss_hash_update,
568 .rss_hash_conf_get = otx2_nix_rss_hash_conf_get,
569 .xstats_get = otx2_nix_xstats_get,
570 .xstats_get_names = otx2_nix_xstats_get_names,
571 .xstats_reset = otx2_nix_xstats_reset,
572 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
573 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
577 nix_lf_attach(struct otx2_eth_dev *dev)
579 struct otx2_mbox *mbox = dev->mbox;
580 struct rsrc_attach_req *req;
583 req = otx2_mbox_alloc_msg_attach_resources(mbox);
587 return otx2_mbox_process(mbox);
591 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
593 struct otx2_mbox *mbox = dev->mbox;
594 struct msix_offset_rsp *msix_rsp;
597 /* Get NPA and NIX MSIX vector offsets */
598 otx2_mbox_alloc_msg_msix_offset(mbox);
600 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
602 dev->nix_msixoff = msix_rsp->nix_msixoff;
608 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
610 struct rsrc_detach_req *req;
612 req = otx2_mbox_alloc_msg_detach_resources(mbox);
614 /* Detach all except npa lf */
622 return otx2_mbox_process(mbox);
626 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
628 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
629 struct rte_pci_device *pci_dev;
632 eth_dev->dev_ops = &otx2_eth_dev_ops;
634 /* For secondary processes, the primary has done all the work */
635 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
636 /* Setup callbacks for secondary process */
637 otx2_eth_set_tx_function(eth_dev);
638 otx2_eth_set_rx_function(eth_dev);
642 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
644 rte_eth_copy_pci_info(eth_dev, pci_dev);
645 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
647 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
648 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
649 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
651 /* Parse devargs string */
652 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
654 otx2_err("Failed to parse devargs rc=%d", rc);
658 if (!dev->mbox_active) {
659 /* Initialize the base otx2_dev object
660 * only if already present
662 rc = otx2_dev_init(pci_dev, dev);
664 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
668 /* Device generic callbacks */
669 dev->ops = &otx2_dev_ops;
670 dev->eth_dev = eth_dev;
672 /* Grab the NPA LF if required */
673 rc = otx2_npa_lf_init(pci_dev, dev);
675 goto otx2_dev_uninit;
678 dev->drv_inited = true;
679 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
680 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
683 rc = nix_lf_attach(dev);
685 goto otx2_npa_uninit;
687 /* Get NIX MSIX offset */
688 rc = nix_lf_get_msix_offset(dev);
690 goto otx2_npa_uninit;
692 /* Register LF irq handlers */
693 rc = otx2_nix_register_irqs(eth_dev);
697 /* Get maximum number of supported MAC entries */
698 max_entries = otx2_cgx_mac_max_entries_get(dev);
699 if (max_entries < 0) {
700 otx2_err("Failed to get max entries for mac addr");
705 /* For VFs, returned max_entries will be 0. But to keep default MAC
706 * address, one entry must be allocated. So setting up to 1.
708 if (max_entries == 0)
711 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
712 RTE_ETHER_ADDR_LEN, 0);
713 if (eth_dev->data->mac_addrs == NULL) {
714 otx2_err("Failed to allocate memory for mac addr");
719 dev->max_mac_entries = max_entries;
721 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
725 /* Update the mac address */
726 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
728 /* Also sync same MAC address to CGX table */
729 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
731 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
732 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
734 if (otx2_dev_is_A0(dev)) {
735 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
736 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
739 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
740 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
741 eth_dev->data->port_id, dev->pf, dev->vf,
742 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
743 dev->rx_offload_capa, dev->tx_offload_capa);
747 rte_free(eth_dev->data->mac_addrs);
749 otx2_nix_unregister_irqs(eth_dev);
751 otx2_eth_dev_lf_detach(dev->mbox);
755 otx2_dev_fini(pci_dev, dev);
757 otx2_err("Failed to init nix eth_dev rc=%d", rc);
762 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
764 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
765 struct rte_pci_device *pci_dev;
768 /* Nothing to be done for secondary processes */
769 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
772 /* Unregister queue irqs */
773 oxt2_nix_unregister_queue_irqs(eth_dev);
775 rc = nix_lf_free(dev);
777 otx2_err("Failed to free nix lf, rc=%d", rc);
779 rc = otx2_npa_lf_fini();
781 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
783 rte_free(eth_dev->data->mac_addrs);
784 eth_dev->data->mac_addrs = NULL;
785 dev->drv_inited = false;
787 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
788 otx2_nix_unregister_irqs(eth_dev);
790 rc = otx2_eth_dev_lf_detach(dev->mbox);
792 otx2_err("Failed to detach resources, rc=%d", rc);
794 /* Check if mbox close is needed */
798 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
799 /* Will be freed later by PMD */
800 eth_dev->data->dev_private = NULL;
804 otx2_dev_fini(pci_dev, dev);
809 nix_remove(struct rte_pci_device *pci_dev)
811 struct rte_eth_dev *eth_dev;
812 struct otx2_idev_cfg *idev;
813 struct otx2_dev *otx2_dev;
816 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
818 /* Cleanup eth dev */
819 rc = otx2_eth_dev_uninit(eth_dev, true);
823 rte_eth_dev_pci_release(eth_dev);
826 /* Nothing to be done for secondary processes */
827 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
830 /* Check for common resources */
831 idev = otx2_intra_dev_get_cfg();
832 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
835 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
837 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
840 /* Safe to cleanup mbox as no more users */
841 otx2_dev_fini(pci_dev, otx2_dev);
846 otx2_info("%s: common resource in use by other devices", pci_dev->name);
851 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
855 RTE_SET_USED(pci_drv);
857 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
860 /* On error on secondary, recheck if port exists in primary or
861 * in mid of detach state.
863 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
864 if (!rte_eth_dev_allocated(pci_dev->device.name))
869 static const struct rte_pci_id pci_nix_map[] = {
871 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
874 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
877 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
878 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
885 static struct rte_pci_driver pci_nix = {
886 .id_table = pci_nix_map,
887 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA |
888 RTE_PCI_DRV_INTR_LSC,
890 .remove = nix_remove,
893 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
894 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
895 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");