1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <ethdev_pci.h>
7 #include "otx2_common.h"
8 #include "otx_ep_common.h"
10 #include "otx2_ep_vf.h"
11 #include "otx_ep_rxtx.h"
13 #define OTX_EP_DEV(_eth_dev) \
14 ((struct otx_ep_device *)(_eth_dev)->data->dev_private)
16 static const struct rte_eth_desc_lim otx_ep_rx_desc_lim = {
17 .nb_max = OTX_EP_MAX_OQ_DESCRIPTORS,
18 .nb_min = OTX_EP_MIN_OQ_DESCRIPTORS,
19 .nb_align = OTX_EP_RXD_ALIGN,
22 static const struct rte_eth_desc_lim otx_ep_tx_desc_lim = {
23 .nb_max = OTX_EP_MAX_IQ_DESCRIPTORS,
24 .nb_min = OTX_EP_MIN_IQ_DESCRIPTORS,
25 .nb_align = OTX_EP_TXD_ALIGN,
29 otx_ep_dev_info_get(struct rte_eth_dev *eth_dev,
30 struct rte_eth_dev_info *devinfo)
32 struct otx_ep_device *otx_epvf;
34 otx_epvf = OTX_EP_DEV(eth_dev);
36 devinfo->speed_capa = ETH_LINK_SPEED_10G;
37 devinfo->max_rx_queues = otx_epvf->max_rx_queues;
38 devinfo->max_tx_queues = otx_epvf->max_tx_queues;
40 devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
41 devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
42 devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
43 devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
44 devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
46 devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
48 devinfo->rx_desc_lim = otx_ep_rx_desc_lim;
49 devinfo->tx_desc_lim = otx_ep_tx_desc_lim;
55 otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
57 struct rte_pci_device *pdev = otx_epvf->pdev;
58 uint32_t dev_id = pdev->id.device_id;
62 case PCI_DEVID_OCTEONTX_EP_VF:
63 otx_epvf->chip_id = dev_id;
64 ret = otx_ep_vf_setup_device(otx_epvf);
65 otx_epvf->fn_list.disable_io_queues(otx_epvf);
67 case PCI_DEVID_OCTEONTX2_EP_NET_VF:
68 case PCI_DEVID_CN98XX_EP_NET_VF:
69 otx_epvf->chip_id = dev_id;
70 ret = otx2_ep_vf_setup_device(otx_epvf);
71 otx_epvf->fn_list.disable_io_queues(otx_epvf);
74 otx_ep_err("Unsupported device\n");
79 otx_ep_info("OTX_EP dev_id[%d]\n", dev_id);
84 /* OTX_EP VF device initialization */
86 otx_epdev_init(struct otx_ep_device *otx_epvf)
88 uint32_t ethdev_queues;
91 ret = otx_ep_chip_specific_setup(otx_epvf);
93 otx_ep_err("Chip specific setup failed\n");
97 otx_epvf->fn_list.setup_device_regs(otx_epvf);
99 ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
100 otx_epvf->max_rx_queues = ethdev_queues;
101 otx_epvf->max_tx_queues = ethdev_queues;
103 otx_ep_info("OTX_EP Device is Ready\n");
110 otx_ep_dev_configure(struct rte_eth_dev *eth_dev)
112 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
113 struct rte_eth_dev_data *data = eth_dev->data;
114 struct rte_eth_rxmode *rxmode;
115 struct rte_eth_txmode *txmode;
116 struct rte_eth_conf *conf;
118 conf = &data->dev_conf;
119 rxmode = &conf->rxmode;
120 txmode = &conf->txmode;
121 if (eth_dev->data->nb_rx_queues > otx_epvf->max_rx_queues ||
122 eth_dev->data->nb_tx_queues > otx_epvf->max_tx_queues) {
123 otx_ep_err("invalid num queues\n");
126 otx_ep_info("OTX_EP Device is configured with num_txq %d num_rxq %d\n",
127 eth_dev->data->nb_rx_queues, eth_dev->data->nb_tx_queues);
129 otx_epvf->rx_offloads = rxmode->offloads;
130 otx_epvf->tx_offloads = txmode->offloads;
136 * Setup our receive queue/ringbuffer. This is the
137 * queue the Octeon uses to send us packets and
138 * responses. We are given a memory pool for our
139 * packet buffers that are used to populate the receive
143 * Pointer to the structure rte_eth_dev
146 * @param num_rx_descs
147 * Number of entries in the queue
149 * Where to allocate memory
151 * Pointer to the struction rte_eth_rxconf
153 * Pointer to the packet pool
156 * - On success, return 0
157 * - On failure, return -1
160 otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
161 uint16_t num_rx_descs, unsigned int socket_id,
162 const struct rte_eth_rxconf *rx_conf __rte_unused,
163 struct rte_mempool *mp)
165 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
166 struct rte_pktmbuf_pool_private *mbp_priv;
169 if (q_no >= otx_epvf->max_rx_queues) {
170 otx_ep_err("Invalid rx queue number %u\n", q_no);
174 if (num_rx_descs & (num_rx_descs - 1)) {
175 otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
179 if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
180 otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
185 otx_ep_dbg("setting up rx queue %u\n", q_no);
187 mbp_priv = rte_mempool_get_priv(mp);
188 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
190 if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
192 otx_ep_err("droq allocation failed\n");
196 eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
202 * Release the receive queue/ringbuffer. Called by
206 * Opaque pointer to the receive queue to release
212 otx_ep_rx_queue_release(void *rxq)
214 struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
215 struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
218 if (otx_ep_delete_oqs(otx_epvf, q_id))
219 otx_ep_err("Failed to delete OQ:%d\n", q_id);
222 /* Define our ethernet definitions */
223 static const struct eth_dev_ops otx_ep_eth_dev_ops = {
224 .dev_configure = otx_ep_dev_configure,
225 .rx_queue_setup = otx_ep_rx_queue_setup,
226 .rx_queue_release = otx_ep_rx_queue_release,
227 .dev_infos_get = otx_ep_dev_info_get,
231 otx_epdev_exit(struct rte_eth_dev *eth_dev)
233 struct otx_ep_device *otx_epvf;
234 uint32_t num_queues, q;
236 otx_ep_info("%s:\n", __func__);
238 otx_epvf = OTX_EP_DEV(eth_dev);
240 num_queues = otx_epvf->nb_rx_queues;
241 for (q = 0; q < num_queues; q++) {
242 if (otx_ep_delete_oqs(otx_epvf, q)) {
243 otx_ep_err("Failed to delete OQ:%d\n", q);
247 otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
253 otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
255 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
257 otx_epdev_exit(eth_dev);
259 eth_dev->dev_ops = NULL;
265 otx_ep_eth_dev_init(struct rte_eth_dev *eth_dev)
267 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
268 struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
269 struct rte_ether_addr vf_mac_addr;
271 /* Single process support */
272 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
275 otx_epvf->eth_dev = eth_dev;
276 otx_epvf->port_id = eth_dev->data->port_id;
277 eth_dev->dev_ops = &otx_ep_eth_dev_ops;
278 eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
279 if (eth_dev->data->mac_addrs == NULL) {
280 otx_ep_err("MAC addresses memory allocation failed\n");
281 eth_dev->dev_ops = NULL;
284 rte_eth_random_addr(vf_mac_addr.addr_bytes);
285 rte_ether_addr_copy(&vf_mac_addr, eth_dev->data->mac_addrs);
286 otx_epvf->hw_addr = pdev->mem_resource[0].addr;
287 otx_epvf->pdev = pdev;
289 otx_epdev_init(otx_epvf);
290 if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
291 otx_epvf->pkind = SDP_OTX2_PKIND;
293 otx_epvf->pkind = SDP_PKIND;
294 otx_ep_info("using pkind %d\n", otx_epvf->pkind);
300 otx_ep_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
301 struct rte_pci_device *pci_dev)
303 return rte_eth_dev_pci_generic_probe(pci_dev,
304 sizeof(struct otx_ep_device),
305 otx_ep_eth_dev_init);
309 otx_ep_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
311 return rte_eth_dev_pci_generic_remove(pci_dev,
312 otx_ep_eth_dev_uninit);
315 /* Set of PCI devices this driver supports */
316 static const struct rte_pci_id pci_id_otx_ep_map[] = {
317 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
318 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
319 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
320 { .vendor_id = 0, /* sentinel */ }
323 static struct rte_pci_driver rte_otx_ep_pmd = {
324 .id_table = pci_id_otx_ep_map,
325 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
326 .probe = otx_ep_eth_dev_pci_probe,
327 .remove = otx_ep_eth_dev_pci_remove,
330 RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
331 RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
332 RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
333 RTE_LOG_REGISTER(otx_net_ep_logtype, pmd.net.octeontx_ep, NOTICE);