1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <ethdev_pci.h>
7 #include "otx2_common.h"
8 #include "otx_ep_common.h"
10 #include "otx2_ep_vf.h"
11 #include "otx_ep_rxtx.h"
13 #define OTX_EP_DEV(_eth_dev) \
14 ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
18 .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
19 .nb_align = OTX_EP_RXD_ALIGN,
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
24 .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
25 .nb_align = OTX_EP_TXD_ALIGN,
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 struct rte_eth_dev_info *devinfo)
32 struct otx_ep_device *otx_epvf;
34 otx_epvf = OTX_EP_DEV(eth_dev);
36 devinfo->speed_capa = ETH_LINK_SPEED_10G;
37 devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 devinfo->max_tx_queues = otx_epvf->max_tx_queues;
40 devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
43 devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
44 devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
46 devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
48 devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
49 devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
55 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
57 struct otx_ep_device *otx_epvf;
61 otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
62 /* Enable IQ/OQ for this device */
63 ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
65 otx_ep_err("IOQ enable failed\n");
69 for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
70 rte_write32(otx_epvf->droq[q]->nb_desc,
71 otx_epvf->droq[q]->pkts_credit_reg);
74 otx_ep_info("OQ[%d] dbells [%d]\n", q,
75 rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
78 otx_ep_info("dev started\n");
83 /* Stop device and disable input/output functions */
85 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
87 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
89 otx_epvf->fn_list.disable_io_queues(otx_epvf);
95 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
97 struct rte_pci_device *pdev = otx_epvf->pdev;
98 uint32_t dev_id = pdev->id.device_id;
102 case PCI_DEVID_OCTEONTX_EP_VF:
103 otx_epvf->chip_id = dev_id;
104 ret = otx_ep_vf_setup_device(otx_epvf);
105 otx_epvf->fn_list.disable_io_queues(otx_epvf);
107 case PCI_DEVID_OCTEONTX2_EP_NET_VF:
108 case PCI_DEVID_CN98XX_EP_NET_VF:
109 otx_epvf->chip_id = dev_id;
110 ret = otx2_ep_vf_setup_device(otx_epvf);
111 otx_epvf->fn_list.disable_io_queues(otx_epvf);
114 otx_ep_err("Unsupported device\n");
119 otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
124 /* OTX_EP VF device initialization */
126 otx_epdev_init(struct otx_ep_device *otx_epvf)
128 uint32_t ethdev_queues;
131 ret = otx_ep_chip_specific_setup(otx_epvf);
133 otx_ep_err("Chip specific setup failed\n");
137 otx_epvf->fn_list.setup_device_regs(otx_epvf);
139 otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
140 if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
141 otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
142 else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX2_EP_NET_VF ||
143 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
144 otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
145 ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
146 otx_epvf->max_rx_queues = ethdev_queues;
147 otx_epvf->max_tx_queues = ethdev_queues;
149 otx_ep_info("OTX_EP Device is Ready\n");
156 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
158 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
159 struct rte_eth_dev_data *data = eth_dev->data;
160 struct rte_eth_rxmode *rxmode;
161 struct rte_eth_txmode *txmode;
162 struct rte_eth_conf *conf;
164 conf = &data->dev_conf;
165 rxmode = &conf->rxmode;
166 txmode = &conf->txmode;
167 if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
168 eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
169 otx_ep_err("invalid num queues\n");
172 otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
173 eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
175 otx_epvf->rx_offloads = rxmode->offloads;
176 otx_epvf->tx_offloads = txmode->offloads;
182 * Setup our receive queue/ringbuffer. This is the
183 * queue the Octeon uses to send us packets and
184 * responses. We are given a memory pool for our
185 * packet buffers that are used to populate the receive
189 * Pointer to the structure rte_eth_dev
192 * @param num_rx_descs
193 * Number of entries in the queue
195 * Where to allocate memory
197 * Pointer to the struction rte_eth_rxconf
199 * Pointer to the packet pool
202 * - On success, return 0
203 * - On failure, return -1
206 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
207 uint16_t num_rx_descs, unsigned int socket_id,
208 const struct rte_eth_rxconf *rx_conf __rte_unused,
209 struct rte_mempool *mp)
211 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
212 struct rte_pktmbuf_pool_private *mbp_priv;
215 if (q_no >= otx_epvf->max_rx_queues) {
216 otx_ep_err("Invalid rx queue number %u\n", q_no);
220 if (num_rx_descs & (num_rx_descs - 1)) {
221 otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
225 if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
226 otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
231 otx_ep_dbg("setting up rx queue %u\n", q_no);
233 mbp_priv = rte_mempool_get_priv(mp);
234 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
236 if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
238 otx_ep_err("droq allocation failed\n");
242 eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
248 * Release the receive queue/ringbuffer. Called by
252 * Opaque pointer to the receive queue to release
258 otx_ep_rx_queue_release(void *rxq)
260 struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
261 struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
264 if (otx_ep_delete_oqs(otx_epvf, q_id))
265 otx_ep_err("Failed to delete OQ:%d\n", q_id);
269 * Allocate and initialize SW ring. Initialize associated HW registers.
272 * Pointer to structure rte_eth_dev
277 * @param num_tx_descs
278 * Number of ringbuffer descriptors
281 * NUMA socket id, used for memory allocations
284 * Pointer to the structure rte_eth_txconf
287 * - On success, return 0
288 * - On failure, return -errno value
291 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
292 uint16_t num_tx_descs, unsigned int socket_id,
293 const struct rte_eth_txconf *tx_conf __rte_unused)
295 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
298 if (q_no >= otx_epvf->max_tx_queues) {
299 otx_ep_err("Invalid tx queue number %u\n", q_no);
302 if (num_tx_descs & (num_tx_descs - 1)) {
303 otx_ep_err("Invalid tx desc number should be pow 2 %u\n",
308 retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
311 otx_ep_err("IQ(TxQ) creation failed.\n");
315 eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
316 otx_ep_dbg("tx queue[%d] setup\n", q_no);
321 * Release the transmit queue/ringbuffer. Called by
325 * Opaque pointer to the transmit queue to release
331 otx_ep_tx_queue_release(void *txq)
333 struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
335 otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
338 /* Define our ethernet definitions */
339 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
340 .dev_configure = otx_ep_dev_configure,
341 .dev_start = otx_ep_dev_start,
342 .dev_stop = otx_ep_dev_stop,
343 .rx_queue_setup = otx_ep_rx_queue_setup,
344 .rx_queue_release = otx_ep_rx_queue_release,
345 .tx_queue_setup = otx_ep_tx_queue_setup,
346 .tx_queue_release = otx_ep_tx_queue_release,
347 .dev_infos_get = otx_ep_dev_info_get,
351 otx_epdev_exit(struct rte_eth_dev *eth_dev)
353 struct otx_ep_device *otx_epvf;
354 uint32_t num_queues, q;
356 otx_ep_info("%s:\n", __func__);
358 otx_epvf = OTX_EP_DEV(eth_dev);
360 otx_epvf->fn_list.disable_io_queues(otx_epvf);
362 num_queues = otx_epvf->nb_rx_queues;
363 for (q = 0; q < num_queues; q++) {
364 if (otx_ep_delete_oqs(otx_epvf, q)) {
365 otx_ep_err("Failed to delete OQ:%d\n", q);
369 otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
371 num_queues = otx_epvf->nb_tx_queues;
372 for (q = 0; q < num_queues; q++) {
373 if (otx_ep_delete_iqs(otx_epvf, q)) {
374 otx_ep_err("Failed to delete IQ:%d\n", q);
378 otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
384 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
386 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
388 otx_epdev_exit(eth_dev);
390 eth_dev->dev_ops = NULL;
391 eth_dev->rx_pkt_burst = NULL;
392 eth_dev->tx_pkt_burst = NULL;
398 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
400 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
401 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
402 struct rte_ether_addr vf_mac_addr;
404 /* Single process support */
405 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
408 otx_epvf->eth_dev = eth_dev;
409 otx_epvf->port_id = eth_dev->data->port_id;
410 eth_dev->dev_ops = &otx_ep_eth_dev_ops;
411 eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
412 if (eth_dev->data->mac_addrs == NULL) {
413 otx_ep_err("MAC addresses memory allocation failed\n");
414 eth_dev->dev_ops = NULL;
417 rte_eth_random_addr(vf_mac_addr.addr_bytes);
418 rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
419 otx_epvf->hw_addr = pdev->mem_resource[0].addr;
420 otx_epvf->pdev = pdev;
422 otx_epdev_init(otx_epvf);
423 if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
424 otx_epvf->pkind = SDP_OTX2_PKIND;
426 otx_epvf->pkind = SDP_PKIND;
427 otx_ep_info("using pkind %d\n", otx_epvf->pkind);
433 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
434 struct rte_pci_device *pci_dev)
436 return rte_eth_dev_pci_generic_probe(pci_dev,
437 sizeof(struct otx_ep_device),
438 otx_ep_eth_dev_init);
442 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
444 return rte_eth_dev_pci_generic_remove(pci_dev,
445 otx_ep_eth_dev_uninit);
448 /* Set of PCI devices this driver supports */
449 static const struct rte_pci_id pci_id_otx_ep_map[] = {
450 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
451 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
452 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
453 { .vendor_id = 0, /* sentinel */ }
456 static struct rte_pci_driver rte_otx_ep_pmd = {
457 .id_table = pci_id_otx_ep_map,
458 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
459 .probe = otx_ep_eth_dev_pci_probe,
460 .remove = otx_ep_eth_dev_pci_remove,
463 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
464 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
465 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
466 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);