1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <ethdev_pci.h>
7 #include "otx2_common.h"
8 #include "otx_ep_common.h"
10 #include "otx2_ep_vf.h"
11 #include "otx_ep_rxtx.h"
13 #define OTX_EP_DEV(_eth_dev) \
14 ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
18 .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
19 .nb_align = OTX_EP_RXD_ALIGN,
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
24 .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
25 .nb_align = OTX_EP_TXD_ALIGN,
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 struct rte_eth_dev_info *devinfo)
32 struct otx_ep_device *otx_epvf;
34 otx_epvf = OTX_EP_DEV(eth_dev);
36 devinfo->speed_capa = ETH_LINK_SPEED_10G;
37 devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 devinfo->max_tx_queues = otx_epvf->max_tx_queues;
40 devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
43 devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
44 devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
46 devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
48 devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
49 devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
55 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
57 struct rte_pci_device *pdev = otx_epvf->pdev;
58 uint32_t dev_id = pdev->id.device_id;
62 case PCI_DEVID_OCTEONTX_EP_VF:
63 otx_epvf->chip_id = dev_id;
64 ret = otx_ep_vf_setup_device(otx_epvf);
65 otx_epvf->fn_list.disable_io_queues(otx_epvf);
67 case PCI_DEVID_OCTEONTX2_EP_NET_VF:
68 case PCI_DEVID_CN98XX_EP_NET_VF:
69 otx_epvf->chip_id = dev_id;
70 ret = otx2_ep_vf_setup_device(otx_epvf);
71 otx_epvf->fn_list.disable_io_queues(otx_epvf);
74 otx_ep_err("Unsupported device\n");
79 otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
84 /* OTX_EP VF device initialization */
86 otx_epdev_init(struct otx_ep_device *otx_epvf)
88 uint32_t ethdev_queues;
91 ret = otx_ep_chip_specific_setup(otx_epvf);
93 otx_ep_err("Chip specific setup failed\n");
97 otx_epvf->fn_list.setup_device_regs(otx_epvf);
99 ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
100 otx_epvf->max_rx_queues = ethdev_queues;
101 otx_epvf->max_tx_queues = ethdev_queues;
103 otx_ep_info("OTX_EP Device is Ready\n");
110 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
112 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
113 struct rte_eth_dev_data *data = eth_dev->data;
114 struct rte_eth_rxmode *rxmode;
115 struct rte_eth_txmode *txmode;
116 struct rte_eth_conf *conf;
118 conf = &data->dev_conf;
119 rxmode = &conf->rxmode;
120 txmode = &conf->txmode;
121 if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
122 eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
123 otx_ep_err("invalid num queues\n");
126 otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
127 eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
129 otx_epvf->rx_offloads = rxmode->offloads;
130 otx_epvf->tx_offloads = txmode->offloads;
136 * Setup our receive queue/ringbuffer. This is the
137 * queue the Octeon uses to send us packets and
138 * responses. We are given a memory pool for our
139 * packet buffers that are used to populate the receive
143 * Pointer to the structure rte_eth_dev
146 * @param num_rx_descs
147 * Number of entries in the queue
149 * Where to allocate memory
151 * Pointer to the struction rte_eth_rxconf
153 * Pointer to the packet pool
156 * - On success, return 0
157 * - On failure, return -1
160 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
161 uint16_t num_rx_descs, unsigned int socket_id,
162 const struct rte_eth_rxconf *rx_conf __rte_unused,
163 struct rte_mempool *mp)
165 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
166 struct rte_pktmbuf_pool_private *mbp_priv;
169 if (q_no >= otx_epvf->max_rx_queues) {
170 otx_ep_err("Invalid rx queue number %u\n", q_no);
174 if (num_rx_descs & (num_rx_descs - 1)) {
175 otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
179 if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
180 otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
185 otx_ep_dbg("setting up rx queue %u\n", q_no);
187 mbp_priv = rte_mempool_get_priv(mp);
188 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
190 if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
192 otx_ep_err("droq allocation failed\n");
196 eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
202 * Release the receive queue/ringbuffer. Called by
206 * Opaque pointer to the receive queue to release
212 otx_ep_rx_queue_release(void *rxq)
214 struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
215 struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
218 if (otx_ep_delete_oqs(otx_epvf, q_id))
219 otx_ep_err("Failed to delete OQ:%d\n", q_id);
223 * Allocate and initialize SW ring. Initialize associated HW registers.
226 * Pointer to structure rte_eth_dev
231 * @param num_tx_descs
232 * Number of ringbuffer descriptors
235 * NUMA socket id, used for memory allocations
238 * Pointer to the structure rte_eth_txconf
241 * - On success, return 0
242 * - On failure, return -errno value
245 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
246 uint16_t num_tx_descs, unsigned int socket_id,
247 const struct rte_eth_txconf *tx_conf __rte_unused)
249 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
252 if (q_no >= otx_epvf->max_tx_queues) {
253 otx_ep_err("Invalid tx queue number %u\n", q_no);
256 if (num_tx_descs & (num_tx_descs - 1)) {
257 otx_ep_err("Invalid tx desc number should be pow 2 %u\n",
262 retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
265 otx_ep_err("IQ(TxQ) creation failed.\n");
269 eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
270 otx_ep_dbg("tx queue[%d] setup\n", q_no);
275 * Release the transmit queue/ringbuffer. Called by
279 * Opaque pointer to the transmit queue to release
285 otx_ep_tx_queue_release(void *txq)
287 struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
289 otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
292 /* Define our ethernet definitions */
293 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
294 .dev_configure = otx_ep_dev_configure,
295 .rx_queue_setup = otx_ep_rx_queue_setup,
296 .rx_queue_release = otx_ep_rx_queue_release,
297 .tx_queue_setup = otx_ep_tx_queue_setup,
298 .tx_queue_release = otx_ep_tx_queue_release,
299 .dev_infos_get = otx_ep_dev_info_get,
303 otx_epdev_exit(struct rte_eth_dev *eth_dev)
305 struct otx_ep_device *otx_epvf;
306 uint32_t num_queues, q;
308 otx_ep_info("%s:\n", __func__);
310 otx_epvf = OTX_EP_DEV(eth_dev);
312 num_queues = otx_epvf->nb_rx_queues;
313 for (q = 0; q < num_queues; q++) {
314 if (otx_ep_delete_oqs(otx_epvf, q)) {
315 otx_ep_err("Failed to delete OQ:%d\n", q);
319 otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
321 num_queues = otx_epvf->nb_tx_queues;
322 for (q = 0; q < num_queues; q++) {
323 if (otx_ep_delete_iqs(otx_epvf, q)) {
324 otx_ep_err("Failed to delete IQ:%d\n", q);
328 otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
334 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
336 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
338 otx_epdev_exit(eth_dev);
340 eth_dev->dev_ops = NULL;
346 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
348 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
349 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
350 struct rte_ether_addr vf_mac_addr;
352 /* Single process support */
353 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
356 otx_epvf->eth_dev = eth_dev;
357 otx_epvf->port_id = eth_dev->data->port_id;
358 eth_dev->dev_ops = &otx_ep_eth_dev_ops;
359 eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
360 if (eth_dev->data->mac_addrs == NULL) {
361 otx_ep_err("MAC addresses memory allocation failed\n");
362 eth_dev->dev_ops = NULL;
365 rte_eth_random_addr(vf_mac_addr.addr_bytes);
366 rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
367 otx_epvf->hw_addr = pdev->mem_resource[0].addr;
368 otx_epvf->pdev = pdev;
370 otx_epdev_init(otx_epvf);
371 if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
372 otx_epvf->pkind = SDP_OTX2_PKIND;
374 otx_epvf->pkind = SDP_PKIND;
375 otx_ep_info("using pkind %d\n", otx_epvf->pkind);
381 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
382 struct rte_pci_device *pci_dev)
384 return rte_eth_dev_pci_generic_probe(pci_dev,
385 sizeof(struct otx_ep_device),
386 otx_ep_eth_dev_init);
390 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
392 return rte_eth_dev_pci_generic_remove(pci_dev,
393 otx_ep_eth_dev_uninit);
396 /* Set of PCI devices this driver supports */
397 static const struct rte_pci_id pci_id_otx_ep_map[] = {
398 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
399 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
400 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
401 { .vendor_id = 0, /* sentinel */ }
404 static struct rte_pci_driver rte_otx_ep_pmd = {
405 .id_table = pci_id_otx_ep_map,
406 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
407 .probe = otx_ep_eth_dev_pci_probe,
408 .remove = otx_ep_eth_dev_pci_remove,
411 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
412 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
413 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
414 RTE_LOG_REGISTER(otx_net_ep_logtype, pmd.net.octeontx_ep, NOTICE);