1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <ethdev_pci.h>
7 #include "otx2_common.h"
8 #include "otx_ep_common.h"
10 #include "otx2_ep_vf.h"
11 #include "otx_ep_rxtx.h"
13 #define OTX_EP_DEV(_eth_dev) \
14 ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
18 .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
19 .nb_align = OTX_EP_RXD_ALIGN,
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
24 .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
25 .nb_align = OTX_EP_TXD_ALIGN,
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 struct rte_eth_dev_info *devinfo)
32 struct otx_ep_device *otx_epvf;
34 otx_epvf = OTX_EP_DEV(eth_dev);
36 devinfo->speed_capa = ETH_LINK_SPEED_10G;
37 devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 devinfo->max_tx_queues = otx_epvf->max_tx_queues;
40 devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
43 devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
44 devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
46 devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
48 devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
49 devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
55 otx_ep_dev_start(struct rte_eth_dev *eth_dev)
57 struct otx_ep_device *otx_epvf;
61 otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
62 /* Enable IQ/OQ for this device */
63 ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
65 otx_ep_err("IOQ enable failed\n");
69 for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
70 rte_write32(otx_epvf->droq[q]->nb_desc,
71 otx_epvf->droq[q]->pkts_credit_reg);
74 otx_ep_info("OQ[%d] dbells [%d]\n", q,
75 rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
78 otx_ep_info("dev started\n");
83 /* Stop device and disable input/output functions */
85 otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
87 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
89 otx_epvf->fn_list.disable_io_queues(otx_epvf);
95 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
97 struct rte_pci_device *pdev = otx_epvf->pdev;
98 uint32_t dev_id = pdev->id.device_id;
102 case PCI_DEVID_OCTEONTX_EP_VF:
103 otx_epvf->chip_id = dev_id;
104 ret = otx_ep_vf_setup_device(otx_epvf);
105 otx_epvf->fn_list.disable_io_queues(otx_epvf);
107 case PCI_DEVID_OCTEONTX2_EP_NET_VF:
108 case PCI_DEVID_CN98XX_EP_NET_VF:
109 otx_epvf->chip_id = dev_id;
110 ret = otx2_ep_vf_setup_device(otx_epvf);
111 otx_epvf->fn_list.disable_io_queues(otx_epvf);
114 otx_ep_err("Unsupported device\n");
119 otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
124 /* OTX_EP VF device initialization */
126 otx_epdev_init(struct otx_ep_device *otx_epvf)
128 uint32_t ethdev_queues;
131 ret = otx_ep_chip_specific_setup(otx_epvf);
133 otx_ep_err("Chip specific setup failed\n");
137 otx_epvf->fn_list.setup_device_regs(otx_epvf);
139 otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
140 ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
141 otx_epvf->max_rx_queues = ethdev_queues;
142 otx_epvf->max_tx_queues = ethdev_queues;
144 otx_ep_info("OTX_EP Device is Ready\n");
151 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
153 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
154 struct rte_eth_dev_data *data = eth_dev->data;
155 struct rte_eth_rxmode *rxmode;
156 struct rte_eth_txmode *txmode;
157 struct rte_eth_conf *conf;
159 conf = &data->dev_conf;
160 rxmode = &conf->rxmode;
161 txmode = &conf->txmode;
162 if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
163 eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
164 otx_ep_err("invalid num queues\n");
167 otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
168 eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
170 otx_epvf->rx_offloads = rxmode->offloads;
171 otx_epvf->tx_offloads = txmode->offloads;
177 * Setup our receive queue/ringbuffer. This is the
178 * queue the Octeon uses to send us packets and
179 * responses. We are given a memory pool for our
180 * packet buffers that are used to populate the receive
184 * Pointer to the structure rte_eth_dev
187 * @param num_rx_descs
188 * Number of entries in the queue
190 * Where to allocate memory
192 * Pointer to the struction rte_eth_rxconf
194 * Pointer to the packet pool
197 * - On success, return 0
198 * - On failure, return -1
201 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
202 uint16_t num_rx_descs, unsigned int socket_id,
203 const struct rte_eth_rxconf *rx_conf __rte_unused,
204 struct rte_mempool *mp)
206 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
207 struct rte_pktmbuf_pool_private *mbp_priv;
210 if (q_no >= otx_epvf->max_rx_queues) {
211 otx_ep_err("Invalid rx queue number %u\n", q_no);
215 if (num_rx_descs & (num_rx_descs - 1)) {
216 otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
220 if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
221 otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
226 otx_ep_dbg("setting up rx queue %u\n", q_no);
228 mbp_priv = rte_mempool_get_priv(mp);
229 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
231 if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
233 otx_ep_err("droq allocation failed\n");
237 eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
243 * Release the receive queue/ringbuffer. Called by
247 * Opaque pointer to the receive queue to release
253 otx_ep_rx_queue_release(void *rxq)
255 struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
256 struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
259 if (otx_ep_delete_oqs(otx_epvf, q_id))
260 otx_ep_err("Failed to delete OQ:%d\n", q_id);
264 * Allocate and initialize SW ring. Initialize associated HW registers.
267 * Pointer to structure rte_eth_dev
272 * @param num_tx_descs
273 * Number of ringbuffer descriptors
276 * NUMA socket id, used for memory allocations
279 * Pointer to the structure rte_eth_txconf
282 * - On success, return 0
283 * - On failure, return -errno value
286 otx_ep_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
287 uint16_t num_tx_descs, unsigned int socket_id,
288 const struct rte_eth_txconf *tx_conf __rte_unused)
290 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
293 if (q_no >= otx_epvf->max_tx_queues) {
294 otx_ep_err("Invalid tx queue number %u\n", q_no);
297 if (num_tx_descs & (num_tx_descs - 1)) {
298 otx_ep_err("Invalid tx desc number should be pow 2 %u\n",
303 retval = otx_ep_setup_iqs(otx_epvf, q_no, num_tx_descs, socket_id);
306 otx_ep_err("IQ(TxQ) creation failed.\n");
310 eth_dev->data->tx_queues[q_no] = otx_epvf->instr_queue[q_no];
311 otx_ep_dbg("tx queue[%d] setup\n", q_no);
316 * Release the transmit queue/ringbuffer. Called by
320 * Opaque pointer to the transmit queue to release
326 otx_ep_tx_queue_release(void *txq)
328 struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
330 otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
333 /* Define our ethernet definitions */
334 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
335 .dev_configure = otx_ep_dev_configure,
336 .dev_start = otx_ep_dev_start,
337 .dev_stop = otx_ep_dev_stop,
338 .rx_queue_setup = otx_ep_rx_queue_setup,
339 .rx_queue_release = otx_ep_rx_queue_release,
340 .tx_queue_setup = otx_ep_tx_queue_setup,
341 .tx_queue_release = otx_ep_tx_queue_release,
342 .dev_infos_get = otx_ep_dev_info_get,
346 otx_epdev_exit(struct rte_eth_dev *eth_dev)
348 struct otx_ep_device *otx_epvf;
349 uint32_t num_queues, q;
351 otx_ep_info("%s:\n", __func__);
353 otx_epvf = OTX_EP_DEV(eth_dev);
355 otx_epvf->fn_list.disable_io_queues(otx_epvf);
357 num_queues = otx_epvf->nb_rx_queues;
358 for (q = 0; q < num_queues; q++) {
359 if (otx_ep_delete_oqs(otx_epvf, q)) {
360 otx_ep_err("Failed to delete OQ:%d\n", q);
364 otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
366 num_queues = otx_epvf->nb_tx_queues;
367 for (q = 0; q < num_queues; q++) {
368 if (otx_ep_delete_iqs(otx_epvf, q)) {
369 otx_ep_err("Failed to delete IQ:%d\n", q);
373 otx_ep_dbg("Num IQs:%d freed\n", otx_epvf->nb_tx_queues);
379 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
381 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
383 otx_epdev_exit(eth_dev);
385 eth_dev->dev_ops = NULL;
386 eth_dev->rx_pkt_burst = NULL;
387 eth_dev->tx_pkt_burst = NULL;
393 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
395 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
396 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
397 struct rte_ether_addr vf_mac_addr;
399 /* Single process support */
400 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
403 otx_epvf->eth_dev = eth_dev;
404 otx_epvf->port_id = eth_dev->data->port_id;
405 eth_dev->dev_ops = &otx_ep_eth_dev_ops;
406 eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
407 if (eth_dev->data->mac_addrs == NULL) {
408 otx_ep_err("MAC addresses memory allocation failed\n");
409 eth_dev->dev_ops = NULL;
412 rte_eth_random_addr(vf_mac_addr.addr_bytes);
413 rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
414 otx_epvf->hw_addr = pdev->mem_resource[0].addr;
415 otx_epvf->pdev = pdev;
417 otx_epdev_init(otx_epvf);
418 if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
419 otx_epvf->pkind = SDP_OTX2_PKIND;
421 otx_epvf->pkind = SDP_PKIND;
422 otx_ep_info("using pkind %d\n", otx_epvf->pkind);
428 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
429 struct rte_pci_device *pci_dev)
431 return rte_eth_dev_pci_generic_probe(pci_dev,
432 sizeof(struct otx_ep_device),
433 otx_ep_eth_dev_init);
437 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
439 return rte_eth_dev_pci_generic_remove(pci_dev,
440 otx_ep_eth_dev_uninit);
443 /* Set of PCI devices this driver supports */
444 static const struct rte_pci_id pci_id_otx_ep_map[] = {
445 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
446 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
447 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
448 { .vendor_id = 0, /* sentinel */ }
451 static struct rte_pci_driver rte_otx_ep_pmd = {
452 .id_table = pci_id_otx_ep_map,
453 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
454 .probe = otx_ep_eth_dev_pci_probe,
455 .remove = otx_ep_eth_dev_pci_remove,
458 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
459 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
460 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
461 RTE_LOG_REGISTER(otx_net_ep_logtype, pmd.net.octeontx_ep, NOTICE);