4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
37 #include <rte_alarm.h>
40 #include "lio_23xx_vf.h"
41 #include "lio_ethdev.h"
45 lio_hweight64(uint64_t w)
47 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
50 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
51 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
52 res = res + (res >> 8);
53 res = res + (res >> 16);
55 return (res + (res >> 32)) & 0x00000000000000FFul;
59 * Setup our receive queue/ringbuffer. This is the
60 * queue the Octeon uses to send us packets and
61 * responses. We are given a memory pool for our
62 * packet buffers that are used to populate the receive
66 * Pointer to the structure rte_eth_dev
70 * Number of entries in the queue
72 * Where to allocate memory
74 * Pointer to the struction rte_eth_rxconf
76 * Pointer to the packet pool
79 * - On success, return 0
80 * - On failure, return -1
83 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
84 uint16_t num_rx_descs, unsigned int socket_id,
85 const struct rte_eth_rxconf *rx_conf __rte_unused,
86 struct rte_mempool *mp)
88 struct lio_device *lio_dev = LIO_DEV(eth_dev);
89 struct rte_pktmbuf_pool_private *mbp_priv;
90 uint32_t fw_mapped_oq;
93 if (q_no >= lio_dev->nb_rx_queues) {
94 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
98 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
100 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
102 if ((lio_dev->droq[fw_mapped_oq]) &&
103 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
105 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
106 lio_dev->droq[fw_mapped_oq]->max_count);
110 mbp_priv = rte_mempool_get_priv(mp);
111 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
113 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
115 lio_dev_err(lio_dev, "droq allocation failed\n");
119 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
125 * Release the receive queue/ringbuffer. Called by
129 * Opaque pointer to the receive queue to release
135 lio_dev_rx_queue_release(void *rxq)
137 struct lio_droq *droq = rxq;
138 struct lio_device *lio_dev = droq->lio_dev;
141 /* Run time queue deletion not supported */
142 if (lio_dev->port_configured)
147 lio_delete_droq_queue(droq->lio_dev, oq_no);
152 * Allocate and initialize SW ring. Initialize associated HW registers.
155 * Pointer to structure rte_eth_dev
160 * @param num_tx_descs
161 * Number of ringbuffer descriptors
164 * NUMA socket id, used for memory allocations
167 * Pointer to the structure rte_eth_txconf
170 * - On success, return 0
171 * - On failure, return -errno value
174 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
175 uint16_t num_tx_descs, unsigned int socket_id,
176 const struct rte_eth_txconf *tx_conf __rte_unused)
178 struct lio_device *lio_dev = LIO_DEV(eth_dev);
179 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
182 if (q_no >= lio_dev->nb_tx_queues) {
183 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
187 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
189 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
190 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
192 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
193 lio_dev->instr_queue[fw_mapped_iq]->max_count);
197 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
198 num_tx_descs, lio_dev, socket_id);
201 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
205 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
206 lio_dev->instr_queue[fw_mapped_iq]->max_count,
210 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
214 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
220 * Release the transmit queue/ringbuffer. Called by
224 * Opaque pointer to the transmit queue to release
230 lio_dev_tx_queue_release(void *txq)
232 struct lio_instr_queue *tq = txq;
233 struct lio_device *lio_dev = tq->lio_dev;
234 uint32_t fw_mapped_iq_no;
236 /* Run time queue deletion not supported */
237 if (lio_dev->port_configured)
242 lio_delete_sglist(tq);
244 fw_mapped_iq_no = tq->txpciq.s.q_no;
245 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
249 static int lio_dev_configure(struct rte_eth_dev *eth_dev)
251 struct lio_device *lio_dev = LIO_DEV(eth_dev);
252 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
253 int retval, num_iqueues, num_oqueues;
254 uint8_t mac[ETHER_ADDR_LEN], i;
255 struct lio_if_cfg_resp *resp;
256 struct lio_soft_command *sc;
257 union lio_if_cfg if_cfg;
260 PMD_INIT_FUNC_TRACE();
262 /* Re-configuring firmware not supported.
263 * Can't change tx/rx queues per port from initial value.
265 if (lio_dev->port_configured) {
266 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
267 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
269 "rxq/txq re-conf not supported. Restart application with new value.\n");
275 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
276 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
278 resp_size = sizeof(struct lio_if_cfg_resp);
279 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
283 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
285 /* Firmware doesn't have capability to reconfigure the queues,
286 * Claim all queues, and use as many required
289 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
290 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
291 if_cfg.s.base_queue = 0;
293 if_cfg.s.gmx_port_id = lio_dev->pf_num;
295 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
296 LIO_OPCODE_IF_CFG, 0,
299 /* Setting wait time in seconds */
300 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
302 retval = lio_send_soft_command(lio_dev, sc);
303 if (retval == LIO_IQ_SEND_FAILED) {
304 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
306 /* Soft instr is freed by driver in case of failure. */
307 goto nic_config_fail;
310 /* Sleep on a wait queue till the cond flag indicates that the
311 * response arrived or timed-out.
313 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
314 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
315 lio_process_ordered_list(lio_dev);
319 retval = resp->status;
321 lio_dev_err(lio_dev, "iq/oq config failed\n");
322 goto nic_config_fail;
325 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
326 sizeof(struct octeon_if_cfg_info) >> 3);
328 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
329 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
331 if (!(num_iqueues) || !(num_oqueues)) {
333 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
334 (unsigned long)resp->cfg_info.iqmask,
335 (unsigned long)resp->cfg_info.oqmask);
336 goto nic_config_fail;
340 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
341 eth_dev->data->port_id,
342 (unsigned long)resp->cfg_info.iqmask,
343 (unsigned long)resp->cfg_info.oqmask,
344 num_iqueues, num_oqueues);
346 lio_dev->linfo.num_rxpciq = num_oqueues;
347 lio_dev->linfo.num_txpciq = num_iqueues;
349 for (i = 0; i < num_oqueues; i++) {
350 lio_dev->linfo.rxpciq[i].rxpciq64 =
351 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
352 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
353 i, lio_dev->linfo.rxpciq[i].s.q_no);
356 for (i = 0; i < num_iqueues; i++) {
357 lio_dev->linfo.txpciq[i].txpciq64 =
358 resp->cfg_info.linfo.txpciq[i].txpciq64;
359 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
360 i, lio_dev->linfo.txpciq[i].s.q_no);
363 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
364 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
365 lio_dev->linfo.link.link_status64 =
366 resp->cfg_info.linfo.link.link_status64;
368 /* 64-bit swap required on LE machines */
369 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
370 for (i = 0; i < ETHER_ADDR_LEN; i++)
371 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
374 /* Copy the permanent MAC address */
375 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
377 lio_dev->glist_lock =
378 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
379 if (lio_dev->glist_lock == NULL)
382 lio_dev->glist_head =
383 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
385 if (lio_dev->glist_head == NULL) {
386 rte_free(lio_dev->glist_lock);
387 lio_dev->glist_lock = NULL;
391 lio_dev->port_configured = 1;
393 lio_free_soft_command(sc);
395 /* Disable iq_0 for reconf */
396 lio_dev->fn_list.disable_io_queues(lio_dev);
399 lio_dev->fn_list.setup_device_regs(lio_dev);
401 /* Free iq_0 used during init */
402 lio_free_instr_queue0(lio_dev);
407 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
408 lio_free_soft_command(sc);
409 lio_free_instr_queue0(lio_dev);
414 /* Define our ethernet definitions */
415 static const struct eth_dev_ops liovf_eth_dev_ops = {
416 .dev_configure = lio_dev_configure,
417 .rx_queue_setup = lio_dev_rx_queue_setup,
418 .rx_queue_release = lio_dev_rx_queue_release,
419 .tx_queue_setup = lio_dev_tx_queue_setup,
420 .tx_queue_release = lio_dev_tx_queue_release,
424 lio_check_pf_hs_response(void *lio_dev)
426 struct lio_device *dev = lio_dev;
428 /* check till response arrives */
429 if (dev->pfvf_hsword.coproc_tics_per_us)
432 cn23xx_vf_handle_mbox(dev);
434 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
438 * \brief Identify the LIO device and to map the BAR address space
439 * @param lio_dev lio device
442 lio_chip_specific_setup(struct lio_device *lio_dev)
444 struct rte_pci_device *pdev = lio_dev->pci_dev;
445 uint32_t dev_id = pdev->id.device_id;
450 case LIO_CN23XX_VF_VID:
451 lio_dev->chip_id = LIO_CN23XX_VF_VID;
452 ret = cn23xx_vf_setup_device(lio_dev);
457 lio_dev_err(lio_dev, "Unsupported Chip\n");
461 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
467 lio_first_time_init(struct lio_device *lio_dev,
468 struct rte_pci_device *pdev)
472 PMD_INIT_FUNC_TRACE();
474 /* set dpdk specific pci device pointer */
475 lio_dev->pci_dev = pdev;
477 /* Identify the LIO type and set device ops */
478 if (lio_chip_specific_setup(lio_dev)) {
479 lio_dev_err(lio_dev, "Chip specific setup failed\n");
483 /* Initialize soft command buffer pool */
484 if (lio_setup_sc_buffer_pool(lio_dev)) {
485 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
489 /* Initialize lists to manage the requests of different types that
490 * arrive from applications for this lio device.
492 lio_setup_response_list(lio_dev);
494 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
495 lio_dev_err(lio_dev, "Mailbox setup failed\n");
499 /* Check PF response */
500 lio_check_pf_hs_response((void *)lio_dev);
502 /* Do handshake and exit if incompatible PF driver */
503 if (cn23xx_pfvf_handshake(lio_dev))
507 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
508 /* Wait for FLR for 100ms per SRIOV specification */
511 if (cn23xx_vf_set_io_queues_off(lio_dev)) {
512 lio_dev_err(lio_dev, "Setting io queues off failed\n");
516 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
517 lio_dev_err(lio_dev, "Failed to configure device registers\n");
521 if (lio_setup_instr_queue0(lio_dev)) {
522 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
526 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
528 lio_dev->max_tx_queues = dpdk_queues;
529 lio_dev->max_rx_queues = dpdk_queues;
531 /* Enable input and output queues for this device */
532 if (lio_dev->fn_list.enable_io_queues(lio_dev))
538 lio_free_sc_buffer_pool(lio_dev);
539 if (lio_dev->mbox[0])
540 lio_dev->fn_list.free_mbox(lio_dev);
541 if (lio_dev->instr_queue[0])
542 lio_free_instr_queue0(lio_dev);
548 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
550 struct lio_device *lio_dev = LIO_DEV(eth_dev);
552 PMD_INIT_FUNC_TRACE();
554 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
557 /* lio_free_sc_buffer_pool */
558 lio_free_sc_buffer_pool(lio_dev);
560 rte_free(eth_dev->data->mac_addrs);
561 eth_dev->data->mac_addrs = NULL;
563 eth_dev->rx_pkt_burst = NULL;
564 eth_dev->tx_pkt_burst = NULL;
570 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
572 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
573 struct lio_device *lio_dev = LIO_DEV(eth_dev);
575 PMD_INIT_FUNC_TRACE();
577 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
578 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
580 /* Primary does the initialization. */
581 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
584 rte_eth_copy_pci_info(eth_dev, pdev);
585 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
587 if (pdev->mem_resource[0].addr) {
588 lio_dev->hw_addr = pdev->mem_resource[0].addr;
590 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
594 lio_dev->eth_dev = eth_dev;
595 /* set lio device print string */
596 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
597 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
598 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
600 lio_dev->port_id = eth_dev->data->port_id;
602 if (lio_first_time_init(lio_dev, pdev)) {
603 lio_dev_err(lio_dev, "Device init failed\n");
607 eth_dev->dev_ops = &liovf_eth_dev_ops;
608 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
609 if (eth_dev->data->mac_addrs == NULL) {
611 "MAC addresses memory allocation failed\n");
612 eth_dev->dev_ops = NULL;
613 eth_dev->rx_pkt_burst = NULL;
614 eth_dev->tx_pkt_burst = NULL;
618 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
621 lio_dev->port_configured = 0;
622 /* Always allow unicast packets */
623 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
628 /* Set of PCI devices this driver supports */
629 static const struct rte_pci_id pci_id_liovf_map[] = {
630 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
631 { .vendor_id = 0, /* sentinel */ }
634 static struct eth_driver rte_liovf_pmd = {
636 .id_table = pci_id_liovf_map,
637 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
638 .probe = rte_eth_dev_pci_probe,
639 .remove = rte_eth_dev_pci_remove,
641 .eth_dev_init = lio_eth_dev_init,
642 .eth_dev_uninit = lio_eth_dev_uninit,
643 .dev_private_size = sizeof(struct lio_device),
646 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd.pci_drv);
647 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
648 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");