1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include <rte_ethdev_pci.h>
7 #include <rte_malloc.h>
9 #include "otx2_ethdev.h"
12 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
14 RTE_SET_USED(eth_dev);
18 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
20 RTE_SET_USED(eth_dev);
23 static inline uint64_t
24 nix_get_rx_offload_capa(struct otx2_eth_dev *dev)
26 uint64_t capa = NIX_RX_OFFLOAD_CAPA;
28 if (otx2_dev_is_vf(dev))
29 capa &= ~DEV_RX_OFFLOAD_TIMESTAMP;
34 static inline uint64_t
35 nix_get_tx_offload_capa(struct otx2_eth_dev *dev)
39 return NIX_TX_OFFLOAD_CAPA;
42 static const struct otx2_dev_ops otx2_dev_ops = {
43 .link_status_update = otx2_eth_dev_link_status_update,
47 nix_lf_alloc(struct otx2_eth_dev *dev, uint32_t nb_rxq, uint32_t nb_txq)
49 struct otx2_mbox *mbox = dev->mbox;
50 struct nix_lf_alloc_req *req;
51 struct nix_lf_alloc_rsp *rsp;
54 req = otx2_mbox_alloc_msg_nix_lf_alloc(mbox);
58 /* XQE_SZ should be in Sync with NIX_CQ_ENTRY_SZ */
59 RTE_BUILD_BUG_ON(NIX_CQ_ENTRY_SZ != 128);
60 req->xqe_sz = NIX_XQESZ_W16;
61 req->rss_sz = dev->rss_info.rss_size;
62 req->rss_grps = NIX_RSS_GRPS;
63 req->npa_func = otx2_npa_pf_func_get();
64 req->sso_func = otx2_sso_pf_func_get();
65 req->rx_cfg = BIT_ULL(35 /* DIS_APAD */);
66 if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
67 DEV_RX_OFFLOAD_UDP_CKSUM)) {
68 req->rx_cfg |= BIT_ULL(37 /* CSUM_OL4 */);
69 req->rx_cfg |= BIT_ULL(36 /* CSUM_IL4 */);
72 rc = otx2_mbox_process_msg(mbox, (void *)&rsp);
76 dev->sqb_size = rsp->sqb_size;
77 dev->tx_chan_base = rsp->tx_chan_base;
78 dev->rx_chan_base = rsp->rx_chan_base;
79 dev->rx_chan_cnt = rsp->rx_chan_cnt;
80 dev->tx_chan_cnt = rsp->tx_chan_cnt;
81 dev->lso_tsov4_idx = rsp->lso_tsov4_idx;
82 dev->lso_tsov6_idx = rsp->lso_tsov6_idx;
83 dev->lf_tx_stats = rsp->lf_tx_stats;
84 dev->lf_rx_stats = rsp->lf_rx_stats;
85 dev->cints = rsp->cints;
86 dev->qints = rsp->qints;
87 dev->npc_flow.channel = dev->rx_chan_base;
93 nix_lf_free(struct otx2_eth_dev *dev)
95 struct otx2_mbox *mbox = dev->mbox;
96 struct nix_lf_free_req *req;
97 struct ndc_sync_op *ndc_req;
100 /* Sync NDC-NIX for LF */
101 ndc_req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
102 ndc_req->nix_lf_tx_sync = 1;
103 ndc_req->nix_lf_rx_sync = 1;
104 rc = otx2_mbox_process(mbox);
106 otx2_err("Error on NDC-NIX-[TX, RX] LF sync, rc %d", rc);
108 req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
109 /* Let AF driver free all this nix lf's
110 * NPC entries allocated using NPC MBOX.
114 return otx2_mbox_process(mbox);
118 otx2_nix_configure(struct rte_eth_dev *eth_dev)
120 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
121 struct rte_eth_dev_data *data = eth_dev->data;
122 struct rte_eth_conf *conf = &data->dev_conf;
123 struct rte_eth_rxmode *rxmode = &conf->rxmode;
124 struct rte_eth_txmode *txmode = &conf->txmode;
125 char ea_fmt[RTE_ETHER_ADDR_FMT_SIZE];
126 struct rte_ether_addr *ea;
127 uint8_t nb_rxq, nb_txq;
133 if (rte_eal_has_hugepages() == 0) {
134 otx2_err("Huge page is not configured");
138 if (rte_eal_iova_mode() != RTE_IOVA_VA) {
139 otx2_err("iova mode should be va");
143 if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
144 otx2_err("Setting link speed/duplex not supported");
148 if (conf->dcb_capability_en == 1) {
149 otx2_err("dcb enable is not supported");
153 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
154 otx2_err("Flow director is not supported");
158 if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
159 rxmode->mq_mode != ETH_MQ_RX_RSS) {
160 otx2_err("Unsupported mq rx mode %d", rxmode->mq_mode);
164 if (txmode->mq_mode != ETH_MQ_TX_NONE) {
165 otx2_err("Unsupported mq tx mode %d", txmode->mq_mode);
169 /* Free the resources allocated from the previous configure */
170 if (dev->configured == 1) {
171 oxt2_nix_unregister_queue_irqs(eth_dev);
175 if (otx2_dev_is_A0(dev) &&
176 (txmode->offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
177 ((txmode->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
178 (txmode->offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM))) {
179 otx2_err("Outer IP and SCTP checksum unsupported");
184 dev->rx_offloads = rxmode->offloads;
185 dev->tx_offloads = txmode->offloads;
186 dev->rss_info.rss_grps = NIX_RSS_GRPS;
188 nb_rxq = RTE_MAX(data->nb_rx_queues, 1);
189 nb_txq = RTE_MAX(data->nb_tx_queues, 1);
192 rc = nix_lf_alloc(dev, nb_rxq, nb_txq);
194 otx2_err("Failed to init nix_lf rc=%d", rc);
198 /* Register queue IRQs */
199 rc = oxt2_nix_register_queue_irqs(eth_dev);
201 otx2_err("Failed to register queue interrupts rc=%d", rc);
205 /* Update the mac address */
206 ea = eth_dev->data->mac_addrs;
207 memcpy(ea, dev->mac_addr, RTE_ETHER_ADDR_LEN);
208 if (rte_is_zero_ether_addr(ea))
209 rte_eth_random_addr((uint8_t *)ea);
211 rte_ether_format_addr(ea_fmt, RTE_ETHER_ADDR_FMT_SIZE, ea);
213 otx2_nix_dbg("Configured port%d mac=%s nb_rxq=%d nb_txq=%d"
214 " rx_offloads=0x%" PRIx64 " tx_offloads=0x%" PRIx64 ""
215 " rx_flags=0x%x tx_flags=0x%x",
216 eth_dev->data->port_id, ea_fmt, nb_rxq,
217 nb_txq, dev->rx_offloads, dev->tx_offloads,
218 dev->rx_offload_flags, dev->tx_offload_flags);
222 dev->configured_nb_rx_qs = data->nb_rx_queues;
223 dev->configured_nb_tx_qs = data->nb_tx_queues;
227 rc = nix_lf_free(dev);
232 /* Initialize and register driver with DPDK Application */
233 static const struct eth_dev_ops otx2_eth_dev_ops = {
234 .dev_infos_get = otx2_nix_info_get,
235 .dev_configure = otx2_nix_configure,
236 .link_update = otx2_nix_link_update,
237 .stats_get = otx2_nix_dev_stats_get,
238 .stats_reset = otx2_nix_dev_stats_reset,
239 .get_reg = otx2_nix_dev_get_reg,
240 .promiscuous_enable = otx2_nix_promisc_enable,
241 .promiscuous_disable = otx2_nix_promisc_disable,
242 .allmulticast_enable = otx2_nix_allmulticast_enable,
243 .allmulticast_disable = otx2_nix_allmulticast_disable,
244 .queue_stats_mapping_set = otx2_nix_queue_stats_mapping,
245 .xstats_get = otx2_nix_xstats_get,
246 .xstats_get_names = otx2_nix_xstats_get_names,
247 .xstats_reset = otx2_nix_xstats_reset,
248 .xstats_get_by_id = otx2_nix_xstats_get_by_id,
249 .xstats_get_names_by_id = otx2_nix_xstats_get_names_by_id,
253 nix_lf_attach(struct otx2_eth_dev *dev)
255 struct otx2_mbox *mbox = dev->mbox;
256 struct rsrc_attach_req *req;
259 req = otx2_mbox_alloc_msg_attach_resources(mbox);
263 return otx2_mbox_process(mbox);
267 nix_lf_get_msix_offset(struct otx2_eth_dev *dev)
269 struct otx2_mbox *mbox = dev->mbox;
270 struct msix_offset_rsp *msix_rsp;
273 /* Get NPA and NIX MSIX vector offsets */
274 otx2_mbox_alloc_msg_msix_offset(mbox);
276 rc = otx2_mbox_process_msg(mbox, (void *)&msix_rsp);
278 dev->nix_msixoff = msix_rsp->nix_msixoff;
284 otx2_eth_dev_lf_detach(struct otx2_mbox *mbox)
286 struct rsrc_detach_req *req;
288 req = otx2_mbox_alloc_msg_detach_resources(mbox);
290 /* Detach all except npa lf */
298 return otx2_mbox_process(mbox);
302 otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
304 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
305 struct rte_pci_device *pci_dev;
308 eth_dev->dev_ops = &otx2_eth_dev_ops;
310 /* For secondary processes, the primary has done all the work */
311 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
312 /* Setup callbacks for secondary process */
313 otx2_eth_set_tx_function(eth_dev);
314 otx2_eth_set_rx_function(eth_dev);
318 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
320 rte_eth_copy_pci_info(eth_dev, pci_dev);
321 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
323 /* Zero out everything after OTX2_DEV to allow proper dev_reset() */
324 memset(&dev->otx2_eth_dev_data_start, 0, sizeof(*dev) -
325 offsetof(struct otx2_eth_dev, otx2_eth_dev_data_start));
327 /* Parse devargs string */
328 rc = otx2_ethdev_parse_devargs(eth_dev->device->devargs, dev);
330 otx2_err("Failed to parse devargs rc=%d", rc);
334 if (!dev->mbox_active) {
335 /* Initialize the base otx2_dev object
336 * only if already present
338 rc = otx2_dev_init(pci_dev, dev);
340 otx2_err("Failed to initialize otx2_dev rc=%d", rc);
344 /* Device generic callbacks */
345 dev->ops = &otx2_dev_ops;
346 dev->eth_dev = eth_dev;
348 /* Grab the NPA LF if required */
349 rc = otx2_npa_lf_init(pci_dev, dev);
351 goto otx2_dev_uninit;
354 dev->drv_inited = true;
355 dev->base = dev->bar2 + (RVU_BLOCK_ADDR_NIX0 << 20);
356 dev->lmt_addr = dev->bar2 + (RVU_BLOCK_ADDR_LMT << 20);
359 rc = nix_lf_attach(dev);
361 goto otx2_npa_uninit;
363 /* Get NIX MSIX offset */
364 rc = nix_lf_get_msix_offset(dev);
366 goto otx2_npa_uninit;
368 /* Register LF irq handlers */
369 rc = otx2_nix_register_irqs(eth_dev);
373 /* Get maximum number of supported MAC entries */
374 max_entries = otx2_cgx_mac_max_entries_get(dev);
375 if (max_entries < 0) {
376 otx2_err("Failed to get max entries for mac addr");
381 /* For VFs, returned max_entries will be 0. But to keep default MAC
382 * address, one entry must be allocated. So setting up to 1.
384 if (max_entries == 0)
387 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", max_entries *
388 RTE_ETHER_ADDR_LEN, 0);
389 if (eth_dev->data->mac_addrs == NULL) {
390 otx2_err("Failed to allocate memory for mac addr");
395 dev->max_mac_entries = max_entries;
397 rc = otx2_nix_mac_addr_get(eth_dev, dev->mac_addr);
401 /* Update the mac address */
402 memcpy(eth_dev->data->mac_addrs, dev->mac_addr, RTE_ETHER_ADDR_LEN);
404 /* Also sync same MAC address to CGX table */
405 otx2_cgx_mac_addr_set(eth_dev, ð_dev->data->mac_addrs[0]);
407 dev->tx_offload_capa = nix_get_tx_offload_capa(dev);
408 dev->rx_offload_capa = nix_get_rx_offload_capa(dev);
410 if (otx2_dev_is_A0(dev)) {
411 dev->hwcap |= OTX2_FIXUP_F_MIN_4K_Q;
412 dev->hwcap |= OTX2_FIXUP_F_LIMIT_CQ_FULL;
415 otx2_nix_dbg("Port=%d pf=%d vf=%d ver=%s msix_off=%d hwcap=0x%" PRIx64
416 " rxoffload_capa=0x%" PRIx64 " txoffload_capa=0x%" PRIx64,
417 eth_dev->data->port_id, dev->pf, dev->vf,
418 OTX2_ETH_DEV_PMD_VERSION, dev->nix_msixoff, dev->hwcap,
419 dev->rx_offload_capa, dev->tx_offload_capa);
423 rte_free(eth_dev->data->mac_addrs);
425 otx2_nix_unregister_irqs(eth_dev);
427 otx2_eth_dev_lf_detach(dev->mbox);
431 otx2_dev_fini(pci_dev, dev);
433 otx2_err("Failed to init nix eth_dev rc=%d", rc);
438 otx2_eth_dev_uninit(struct rte_eth_dev *eth_dev, bool mbox_close)
440 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
441 struct rte_pci_device *pci_dev;
444 /* Nothing to be done for secondary processes */
445 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
448 /* Unregister queue irqs */
449 oxt2_nix_unregister_queue_irqs(eth_dev);
451 rc = nix_lf_free(dev);
453 otx2_err("Failed to free nix lf, rc=%d", rc);
455 rc = otx2_npa_lf_fini();
457 otx2_err("Failed to cleanup npa lf, rc=%d", rc);
459 rte_free(eth_dev->data->mac_addrs);
460 eth_dev->data->mac_addrs = NULL;
461 dev->drv_inited = false;
463 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
464 otx2_nix_unregister_irqs(eth_dev);
466 rc = otx2_eth_dev_lf_detach(dev->mbox);
468 otx2_err("Failed to detach resources, rc=%d", rc);
470 /* Check if mbox close is needed */
474 if (otx2_npa_lf_active(dev) || otx2_dev_active_vfs(dev)) {
475 /* Will be freed later by PMD */
476 eth_dev->data->dev_private = NULL;
480 otx2_dev_fini(pci_dev, dev);
485 nix_remove(struct rte_pci_device *pci_dev)
487 struct rte_eth_dev *eth_dev;
488 struct otx2_idev_cfg *idev;
489 struct otx2_dev *otx2_dev;
492 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
494 /* Cleanup eth dev */
495 rc = otx2_eth_dev_uninit(eth_dev, true);
499 rte_eth_dev_pci_release(eth_dev);
502 /* Nothing to be done for secondary processes */
503 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
506 /* Check for common resources */
507 idev = otx2_intra_dev_get_cfg();
508 if (!idev || !idev->npa_lf || idev->npa_lf->pci_dev != pci_dev)
511 otx2_dev = container_of(idev->npa_lf, struct otx2_dev, npalf);
513 if (otx2_npa_lf_active(otx2_dev) || otx2_dev_active_vfs(otx2_dev))
516 /* Safe to cleanup mbox as no more users */
517 otx2_dev_fini(pci_dev, otx2_dev);
522 otx2_info("%s: common resource in use by other devices", pci_dev->name);
527 nix_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
531 RTE_SET_USED(pci_drv);
533 rc = rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct otx2_eth_dev),
536 /* On error on secondary, recheck if port exists in primary or
537 * in mid of detach state.
539 if (rte_eal_process_type() != RTE_PROC_PRIMARY && rc)
540 if (!rte_eth_dev_allocated(pci_dev->device.name))
545 static const struct rte_pci_id pci_nix_map[] = {
547 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF)
550 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_VF)
553 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
554 PCI_DEVID_OCTEONTX2_RVU_AF_VF)
561 static struct rte_pci_driver pci_nix = {
562 .id_table = pci_nix_map,
563 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA |
564 RTE_PCI_DRV_INTR_LSC,
566 .remove = nix_remove,
569 RTE_PMD_REGISTER_PCI(net_octeontx2, pci_nix);
570 RTE_PMD_REGISTER_PCI_TABLE(net_octeontx2, pci_nix_map);
571 RTE_PMD_REGISTER_KMOD_DEP(net_octeontx2, "vfio-pci");