1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
9 #include "otx2_ethdev.h"
12 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
14 RTE_SET_USED(eth_dev);
18 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
20 RTE_SET_USED(eth_dev);
23 static inline uint64_t
24 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
26 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
28 if (otx2_dev_is_vf(dev))
29 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
34 static inline uint64_t
35 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
39 return NIX_TX_OFFLOAD_CAPA;
43 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
45 struct otx2_mbox *mbox = dev->mbox;
46 struct nix_lf_alloc_req *req;
47 struct nix_lf_alloc_rsp *rsp;
50 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
54 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
55 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
56 req->xqe_sz = NIX_XQESZ_W16;
57 req->rss_sz = dev->rss_info.rss_size;
58 req->rss_grps = NIX_RSS_GRPS;
59 req->npa_func = otx2_npa_pf_func_get();
60 req->sso_func = otx2_sso_pf_func_get();
61 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
62 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
63 DEV_RX_OFFLOAD_UDP_CKSUM)) {
64 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
65 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
68 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
72 dev->sqb_size = rsp->sqb_size;
73 dev->tx_chan_base = rsp->tx_chan_base;
74 dev->rx_chan_base = rsp->rx_chan_base;
75 dev->rx_chan_cnt = rsp->rx_chan_cnt;
76 dev->tx_chan_cnt = rsp->tx_chan_cnt;
77 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
78 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
79 dev->lf_tx_stats = rsp->lf_tx_stats;
80 dev->lf_rx_stats = rsp->lf_rx_stats;
81 dev->cints = rsp->cints;
82 dev->qints = rsp->qints;
83 dev->npc_flow.channel = dev->rx_chan_base;
89 nix_lf_free(struct otx2_eth_dev *dev)
91 struct otx2_mbox *mbox = dev->mbox;
92 struct nix_lf_free_req *req;
93 struct ndc_sync_op *ndc_req;
96 /* Sync NDC-NIX for LF */
97 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
98 ndc_req->nix_lf_tx_sync = 1;
99 ndc_req->nix_lf_rx_sync = 1;
100 rc = otx2_mbox_process(mbox);
102 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
104 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
105 /* Let AF driver free all this nix lf's
106 * NPC entries allocated using NPC MBOX.
110 return otx2_mbox_process(mbox);
114 otx2_nix_configure(struct rte_eth_dev *eth_dev)
116 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
117 struct rte_eth_dev_data *data = eth_dev->data;
118 struct rte_eth_conf *conf = &data->dev_conf;
119 struct rte_eth_rxmode *rxmode = &conf->rxmode;
120 struct rte_eth_txmode *txmode = &conf->txmode;
121 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
122 struct rte_ether_addr *ea;
123 uint8_t nb_rxq, nb_txq;
129 if (rte_eal_has_hugepages() == 0) {
130 otx2_err("Huge page is not configured");
134 if (rte_eal_iova_mode() != RTE_IOVA_VA) {
135 otx2_err("iova mode should be va");
139 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
140 otx2_err("Setting link speed/duplex not supported");
144 if (conf->dcb_capability_en == 1) {
145 otx2_err("dcb enable is not supported");
149 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
150 otx2_err("Flow director is not supported");
154 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
155 rxmode->mq_mode != ETH_MQ_RX_RSS) {
156 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
160 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
161 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
165 /* Free the resources allocated from the previous configure */
166 if (dev->configured == 1) {
167 oxt2_nix_unregister_queue_irqs(eth_dev);
171 if (otx2_dev_is_A0(dev) &&
172 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
173 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
174 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
175 otx2_err("Outer IP and SCTP checksum unsupported");
180 dev->rx_offloads = rxmode->offloads;
181 dev->tx_offloads = txmode->offloads;
182 dev->rss_info.rss_grps = NIX_RSS_GRPS;
184 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
185 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
188 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
190 otx2_err("Failed to init nix_lf rc=%d", rc);
194 /* Register queue IRQs */
195 rc = oxt2_nix_register_queue_irqs(eth_dev);
197 otx2_err("Failed to register queue interrupts rc=%d", rc);
201 /* Update the mac address */
202 ea = eth_dev->data->mac_addrs;
203 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
204 if (rte_is_zero_ether_addr(ea))
205 rte_eth_random_addr((uint8_t *)ea);
207 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
209 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
210 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
211 " rx_flags=0x%x tx_flags=0x%x",
212 eth_dev->data->port_id, ea_fmt, nb_rxq,
213 nb_txq, dev->rx_offloads, dev->tx_offloads,
214 dev->rx_offload_flags, dev->tx_offload_flags);
218 dev->configured_nb_rx_qs = data->nb_rx_queues;
219 dev->configured_nb_tx_qs = data->nb_tx_queues;
223 rc = nix_lf_free(dev);
228 /* Initialize and register driver with DPDK Application */
229 static const struct eth_dev_ops otx2_eth_dev_ops = {
230 .dev_infos_get = otx2_nix_info_get,
231 .dev_configure = otx2_nix_configure,
232 .get_reg = otx2_nix_dev_get_reg,
236 nix_lf_attach(struct otx2_eth_dev *dev)
238 struct otx2_mbox *mbox = dev->mbox;
239 struct rsrc_attach_req *req;
242 req = otx2_mbox_alloc_msg_attach_resources(mbox);
246 return otx2_mbox_process(mbox);
250 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
252 struct otx2_mbox *mbox = dev->mbox;
253 struct msix_offset_rsp *msix_rsp;
256 /* Get NPA and NIX MSIX vector offsets */
257 otx2_mbox_alloc_msg_msix_offset(mbox);
259 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
261 dev->nix_msixoff = msix_rsp->nix_msixoff;
267 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
269 struct rsrc_detach_req *req;
271 req = otx2_mbox_alloc_msg_detach_resources(mbox);
273 /* Detach all except npa lf */
281 return otx2_mbox_process(mbox);
285 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
287 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
288 struct rte_pci_device *pci_dev;
291 eth_dev->dev_ops = &otx2_eth_dev_ops;
293 /* For secondary processes, the primary has done all the work */
294 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
295 /* Setup callbacks for secondary process */
296 otx2_eth_set_tx_function(eth_dev);
297 otx2_eth_set_rx_function(eth_dev);
301 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
303 rte_eth_copy_pci_info(eth_dev, pci_dev);
304 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
306 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
307 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
308 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
310 /* Parse devargs string */
311 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
313 otx2_err("Failed to parse devargs rc=%d", rc);
317 if (!dev->mbox_active) {
318 /* Initialize the base otx2_dev object
319 * only if already present
321 rc = otx2_dev_init(pci_dev, dev);
323 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
328 /* Grab the NPA LF if required */
329 rc = otx2_npa_lf_init(pci_dev, dev);
331 goto otx2_dev_uninit;
334 dev->drv_inited = true;
335 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
336 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
339 rc = nix_lf_attach(dev);
341 goto otx2_npa_uninit;
343 /* Get NIX MSIX offset */
344 rc = nix_lf_get_msix_offset(dev);
346 goto otx2_npa_uninit;
348 /* Register LF irq handlers */
349 rc = otx2_nix_register_irqs(eth_dev);
353 /* Get maximum number of supported MAC entries */
354 max_entries = otx2_cgx_mac_max_entries_get(dev);
355 if (max_entries < 0) {
356 otx2_err("Failed to get max entries for mac addr");
361 /* For VFs, returned max_entries will be 0. But to keep default MAC
362 * address, one entry must be allocated. So setting up to 1.
364 if (max_entries == 0)
367 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
368 RTE_ETHER_ADDR_LEN, 0);
369 if (eth_dev->data->mac_addrs == NULL) {
370 otx2_err("Failed to allocate memory for mac addr");
375 dev->max_mac_entries = max_entries;
377 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
381 /* Update the mac address */
382 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
384 /* Also sync same MAC address to CGX table */
385 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
387 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
388 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
390 if (otx2_dev_is_A0(dev)) {
391 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
392 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
395 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
396 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
397 eth_dev->data->port_id, dev->pf, dev->vf,
398 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
399 dev->rx_offload_capa, dev->tx_offload_capa);
403 rte_free(eth_dev->data->mac_addrs);
405 otx2_nix_unregister_irqs(eth_dev);
407 otx2_eth_dev_lf_detach(dev->mbox);
411 otx2_dev_fini(pci_dev, dev);
413 otx2_err("Failed to init nix eth_dev rc=%d", rc);
418 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
420 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
421 struct rte_pci_device *pci_dev;
424 /* Nothing to be done for secondary processes */
425 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
428 /* Unregister queue irqs */
429 oxt2_nix_unregister_queue_irqs(eth_dev);
431 rc = nix_lf_free(dev);
433 otx2_err("Failed to free nix lf, rc=%d", rc);
435 rc = otx2_npa_lf_fini();
437 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
439 rte_free(eth_dev->data->mac_addrs);
440 eth_dev->data->mac_addrs = NULL;
441 dev->drv_inited = false;
443 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
444 otx2_nix_unregister_irqs(eth_dev);
446 rc = otx2_eth_dev_lf_detach(dev->mbox);
448 otx2_err("Failed to detach resources, rc=%d", rc);
450 /* Check if mbox close is needed */
454 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
455 /* Will be freed later by PMD */
456 eth_dev->data->dev_private = NULL;
460 otx2_dev_fini(pci_dev, dev);
465 nix_remove(struct rte_pci_device *pci_dev)
467 struct rte_eth_dev *eth_dev;
468 struct otx2_idev_cfg *idev;
469 struct otx2_dev *otx2_dev;
472 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
474 /* Cleanup eth dev */
475 rc = otx2_eth_dev_uninit(eth_dev, true);
479 rte_eth_dev_pci_release(eth_dev);
482 /* Nothing to be done for secondary processes */
483 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
486 /* Check for common resources */
487 idev = otx2_intra_dev_get_cfg();
488 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
491 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
493 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
496 /* Safe to cleanup mbox as no more users */
497 otx2_dev_fini(pci_dev, otx2_dev);
502 otx2_info("%s: common resource in use by other devices", pci_dev->name);
507 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
511 RTE_SET_USED(pci_drv);
513 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
516 /* On error on secondary, recheck if port exists in primary or
517 * in mid of detach state.
519 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
520 if (!rte_eth_dev_allocated(pci_dev->device.name))
525 static const struct rte_pci_id pci_nix_map[] = {
527 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
530 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
533 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
534 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
541 static struct rte_pci_driver pci_nix = {
542 .id_table = pci_nix_map,
543 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA |
544 RTE_PCI_DRV_INTR_LSC,
546 .remove = nix_remove,
549 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
550 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
551 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");