1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <ethdev_pci.h>
7 #include "otx2_common.h"
8 #include "otx_ep_common.h"
10 #include "otx2_ep_vf.h"
11 #include "otx_ep_rxtx.h"
13 #define OTX_EP_DEV(_eth_dev) \
14 ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
18 .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
19 .nb_align = OTX_EP_RXD_ALIGN,
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
24 .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
25 .nb_align = OTX_EP_TXD_ALIGN,
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 struct rte_eth_dev_info *devinfo)
32 struct otx_ep_device *otx_epvf;
34 otx_epvf = OTX_EP_DEV(eth_dev);
36 devinfo->speed_capa = ETH_LINK_SPEED_10G;
37 devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 devinfo->max_tx_queues = otx_epvf->max_tx_queues;
40 devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
43 devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
44 devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
46 devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
48 devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
49 devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
55 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
57 struct otx_ep_device *otx_epvf;
61 otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
62 /* Enable IQ/OQ for this device */
63 ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
65 otx_ep_err("IOQ enable failed\n");
69 for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
70 rte_write32(otx_epvf->droq[q]->nb_desc,
71 otx_epvf->droq[q]->pkts_credit_reg);
74 otx_ep_info("OQ[%d] dbells [%d]\n", q,
75 rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
78 otx_ep_info("dev started\n");
83 /* Stop device and disable input/output functions */
85 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
87 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
89 otx_epvf->fn_list.disable_io_queues(otx_epvf);
95 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
97 struct rte_pci_device *pdev = otx_epvf->pdev;
98 uint32_t dev_id = pdev->id.device_id;
102 case PCI_DEVID_OCTEONTX_EP_VF:
103 otx_epvf->chip_id = dev_id;
104 ret = otx_ep_vf_setup_device(otx_epvf);
105 otx_epvf->fn_list.disable_io_queues(otx_epvf);
107 case PCI_DEVID_OCTEONTX2_EP_NET_VF:
108 case PCI_DEVID_CN98XX_EP_NET_VF:
109 otx_epvf->chip_id = dev_id;
110 ret = otx2_ep_vf_setup_device(otx_epvf);
111 otx_epvf->fn_list.disable_io_queues(otx_epvf);
114 otx_ep_err("Unsupported device\n");
119 otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
124 /* OTX_EP VF device initialization */
126 otx_epdev_init(struct otx_ep_device *otx_epvf)
128 uint32_t ethdev_queues;
131 ret = otx_ep_chip_specific_setup(otx_epvf);
133 otx_ep_err("Chip specific setup failed\n");
137 otx_epvf->fn_list.setup_device_regs(otx_epvf);
139 otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
140 if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
141 otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
142 else if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX2_EP_NET_VF ||
143 otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
144 otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
145 ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
146 otx_epvf->max_rx_queues = ethdev_queues;
147 otx_epvf->max_tx_queues = ethdev_queues;
149 otx_ep_info("OTX_EP Device is Ready\n");
156 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
158 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
159 struct rte_eth_dev_data *data = eth_dev->data;
160 struct rte_eth_rxmode *rxmode;
161 struct rte_eth_txmode *txmode;
162 struct rte_eth_conf *conf;
164 conf = &data->dev_conf;
165 rxmode = &conf->rxmode;
166 txmode = &conf->txmode;
167 if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
168 eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
169 otx_ep_err("invalid num queues\n");
172 otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
173 eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
175 otx_epvf->rx_offloads = rxmode->offloads;
176 otx_epvf->tx_offloads = txmode->offloads;
182 * Setup our receive queue/ringbuffer. This is the
183 * queue the Octeon uses to send us packets and
184 * responses. We are given a memory pool for our
185 * packet buffers that are used to populate the receive
189 * Pointer to the structure rte_eth_dev
192 * @param num_rx_descs
193 * Number of entries in the queue
195 * Where to allocate memory
197 * Pointer to the struction rte_eth_rxconf
199 * Pointer to the packet pool
202 * - On success, return 0
203 * - On failure, return -1
206 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
207 uint16_t num_rx_descs, unsigned int socket_id,
208 const struct rte_eth_rxconf *rx_conf __rte_unused,
209 struct rte_mempool *mp)
211 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
212 struct rte_pktmbuf_pool_private *mbp_priv;
215 if (q_no >= otx_epvf->max_rx_queues) {
216 otx_ep_err("Invalid rx queue number %u\n", q_no);
220 if (num_rx_descs & (num_rx_descs - 1)) {
221 otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
225 if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
226 otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
231 otx_ep_dbg("setting up rx queue %u\n", q_no);
233 mbp_priv = rte_mempool_get_priv(mp);
234 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
236 if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
238 otx_ep_err("droq allocation failed\n");
242 eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
248 * Release the receive queue/ringbuffer. Called by
252 * Pointer to Ethernet device structure.
254 * Receive queue index.
260 otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
262 struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
263 struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
266 if (otx_ep_delete_oqs(otx_epvf, q_id))
267 otx_ep_err("Failed to delete OQ:%d\n", q_id);
271 * Allocate and initialize SW ring. Initialize associated HW registers.
274 * Pointer to structure rte_eth_dev
279 * @param num_tx_descs
280 * Number of ringbuffer descriptors
283 * NUMA socket id, used for memory allocations
286 * Pointer to the structure rte_eth_txconf
289 * - On success, return 0
290 * - On failure, return -errno value
293 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
294 uint16_t num_tx_descs, unsigned int socket_id,
295 const struct rte_eth_txconf *tx_conf __rte_unused)
297 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
300 if (q_no >= otx_epvf->max_tx_queues) {
301 otx_ep_err("Invalid tx queue number %u\n", q_no);
304 if (num_tx_descs & (num_tx_descs - 1)) {
305 otx_ep_err("Invalid tx desc number should be pow 2 %u\n",
310 retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
313 otx_ep_err("IQ(TxQ) creation failed.\n");
317 eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
318 otx_ep_dbg("tx queue[%d] setup\n", q_no);
323 * Release the transmit queue/ringbuffer. Called by
327 * Pointer to Ethernet device structure.
329 * Transmit queue index.
335 otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
337 struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
339 otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
342 /* Define our ethernet definitions */
343 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
344 .dev_configure = otx_ep_dev_configure,
345 .dev_start = otx_ep_dev_start,
346 .dev_stop = otx_ep_dev_stop,
347 .rx_queue_setup = otx_ep_rx_queue_setup,
348 .rx_queue_release = otx_ep_rx_queue_release,
349 .tx_queue_setup = otx_ep_tx_queue_setup,
350 .tx_queue_release = otx_ep_tx_queue_release,
351 .dev_infos_get = otx_ep_dev_info_get,
355 otx_epdev_exit(struct rte_eth_dev *eth_dev)
357 struct otx_ep_device *otx_epvf;
358 uint32_t num_queues, q;
360 otx_ep_info("%s:\n", __func__);
362 otx_epvf = OTX_EP_DEV(eth_dev);
364 otx_epvf->fn_list.disable_io_queues(otx_epvf);
366 num_queues = otx_epvf->nb_rx_queues;
367 for (q = 0; q < num_queues; q++) {
368 if (otx_ep_delete_oqs(otx_epvf, q)) {
369 otx_ep_err("Failed to delete OQ:%d\n", q);
373 otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
375 num_queues = otx_epvf->nb_tx_queues;
376 for (q = 0; q < num_queues; q++) {
377 if (otx_ep_delete_iqs(otx_epvf, q)) {
378 otx_ep_err("Failed to delete IQ:%d\n", q);
382 otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
388 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
390 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
392 otx_epdev_exit(eth_dev);
394 eth_dev->dev_ops = NULL;
395 eth_dev->rx_pkt_burst = NULL;
396 eth_dev->tx_pkt_burst = NULL;
402 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
404 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
405 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
406 struct rte_ether_addr vf_mac_addr;
408 /* Single process support */
409 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
412 otx_epvf->eth_dev = eth_dev;
413 otx_epvf->port_id = eth_dev->data->port_id;
414 eth_dev->dev_ops = &otx_ep_eth_dev_ops;
415 eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
416 if (eth_dev->data->mac_addrs == NULL) {
417 otx_ep_err("MAC addresses memory allocation failed\n");
418 eth_dev->dev_ops = NULL;
421 rte_eth_random_addr(vf_mac_addr.addr_bytes);
422 rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
423 otx_epvf->hw_addr = pdev->mem_resource[0].addr;
424 otx_epvf->pdev = pdev;
426 otx_epdev_init(otx_epvf);
427 if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
428 otx_epvf->pkind = SDP_OTX2_PKIND;
430 otx_epvf->pkind = SDP_PKIND;
431 otx_ep_info("using pkind %d\n", otx_epvf->pkind);
437 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
438 struct rte_pci_device *pci_dev)
440 return rte_eth_dev_pci_generic_probe(pci_dev,
441 sizeof(struct otx_ep_device),
442 otx_ep_eth_dev_init);
446 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
448 return rte_eth_dev_pci_generic_remove(pci_dev,
449 otx_ep_eth_dev_uninit);
452 /* Set of PCI devices this driver supports */
453 static const struct rte_pci_id pci_id_otx_ep_map[] = {
454 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
455 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
456 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
457 { .vendor_id = 0, /* sentinel */ }
460 static struct rte_pci_driver rte_otx_ep_pmd = {
461 .id_table = pci_id_otx_ep_map,
462 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
463 .probe = otx_ep_eth_dev_pci_probe,
464 .remove = otx_ep_eth_dev_pci_remove,
467 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
468 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
469 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
470 RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);