4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_cycles.h>
36 #include <rte_malloc.h>
37 #include <rte_alarm.h>
40 #include "lio_23xx_vf.h"
41 #include "lio_ethdev.h"
44 /* Wait for control command to reach nic. */
46 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
47 struct lio_dev_ctrl_cmd *ctrl_cmd)
49 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
51 while ((ctrl_cmd->cond == 0) && --timeout) {
52 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
60 * \brief Send Rx control command
61 * @param eth_dev Pointer to the structure rte_eth_dev
62 * @param start_stop whether to start or stop
65 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
67 struct lio_device *lio_dev = LIO_DEV(eth_dev);
68 struct lio_dev_ctrl_cmd ctrl_cmd;
69 struct lio_ctrl_pkt ctrl_pkt;
71 /* flush added to prevent cmd failure
72 * incase the queue is full
74 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
76 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
77 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
79 ctrl_cmd.eth_dev = eth_dev;
82 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
83 ctrl_pkt.ncmd.s.param1 = start_stop;
84 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
86 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
87 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
91 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
92 lio_dev_err(lio_dev, "RX Control command timed out\n");
100 * Atomically writes the link status information into global
101 * structure rte_eth_dev.
104 * - Pointer to the structure rte_eth_dev to read from.
105 * - Pointer to the buffer to be saved with the link status.
108 * - On success, zero.
109 * - On failure, negative value.
112 lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
113 struct rte_eth_link *link)
115 struct rte_eth_link *dst = ð_dev->data->dev_link;
116 struct rte_eth_link *src = link;
118 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
119 *(uint64_t *)src) == 0)
126 lio_hweight64(uint64_t w)
128 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
131 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
132 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
133 res = res + (res >> 8);
134 res = res + (res >> 16);
136 return (res + (res >> 32)) & 0x00000000000000FFul;
140 lio_dev_link_update(struct rte_eth_dev *eth_dev,
141 int wait_to_complete __rte_unused)
143 struct lio_device *lio_dev = LIO_DEV(eth_dev);
144 struct rte_eth_link link, old;
147 link.link_status = ETH_LINK_DOWN;
148 link.link_speed = ETH_SPEED_NUM_NONE;
149 link.link_duplex = ETH_LINK_HALF_DUPLEX;
150 memset(&old, 0, sizeof(old));
152 /* Return what we found */
153 if (lio_dev->linfo.link.s.link_up == 0) {
154 /* Interface is down */
155 if (lio_dev_atomic_write_link_status(eth_dev, &link))
157 if (link.link_status == old.link_status)
162 link.link_status = ETH_LINK_UP; /* Interface is up */
163 link.link_duplex = ETH_LINK_FULL_DUPLEX;
164 switch (lio_dev->linfo.link.s.speed) {
165 case LIO_LINK_SPEED_10000:
166 link.link_speed = ETH_SPEED_NUM_10G;
169 link.link_speed = ETH_SPEED_NUM_NONE;
170 link.link_duplex = ETH_LINK_HALF_DUPLEX;
173 if (lio_dev_atomic_write_link_status(eth_dev, &link))
176 if (link.link_status == old.link_status)
183 * Setup our receive queue/ringbuffer. This is the
184 * queue the Octeon uses to send us packets and
185 * responses. We are given a memory pool for our
186 * packet buffers that are used to populate the receive
190 * Pointer to the structure rte_eth_dev
193 * @param num_rx_descs
194 * Number of entries in the queue
196 * Where to allocate memory
198 * Pointer to the struction rte_eth_rxconf
200 * Pointer to the packet pool
203 * - On success, return 0
204 * - On failure, return -1
207 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
208 uint16_t num_rx_descs, unsigned int socket_id,
209 const struct rte_eth_rxconf *rx_conf __rte_unused,
210 struct rte_mempool *mp)
212 struct lio_device *lio_dev = LIO_DEV(eth_dev);
213 struct rte_pktmbuf_pool_private *mbp_priv;
214 uint32_t fw_mapped_oq;
217 if (q_no >= lio_dev->nb_rx_queues) {
218 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
222 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
224 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
226 if ((lio_dev->droq[fw_mapped_oq]) &&
227 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
229 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
230 lio_dev->droq[fw_mapped_oq]->max_count);
234 mbp_priv = rte_mempool_get_priv(mp);
235 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
237 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
239 lio_dev_err(lio_dev, "droq allocation failed\n");
243 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
249 * Release the receive queue/ringbuffer. Called by
253 * Opaque pointer to the receive queue to release
259 lio_dev_rx_queue_release(void *rxq)
261 struct lio_droq *droq = rxq;
262 struct lio_device *lio_dev = droq->lio_dev;
265 /* Run time queue deletion not supported */
266 if (lio_dev->port_configured)
271 lio_delete_droq_queue(droq->lio_dev, oq_no);
276 * Allocate and initialize SW ring. Initialize associated HW registers.
279 * Pointer to structure rte_eth_dev
284 * @param num_tx_descs
285 * Number of ringbuffer descriptors
288 * NUMA socket id, used for memory allocations
291 * Pointer to the structure rte_eth_txconf
294 * - On success, return 0
295 * - On failure, return -errno value
298 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
299 uint16_t num_tx_descs, unsigned int socket_id,
300 const struct rte_eth_txconf *tx_conf __rte_unused)
302 struct lio_device *lio_dev = LIO_DEV(eth_dev);
303 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
306 if (q_no >= lio_dev->nb_tx_queues) {
307 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
311 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
313 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
314 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
316 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
317 lio_dev->instr_queue[fw_mapped_iq]->max_count);
321 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
322 num_tx_descs, lio_dev, socket_id);
325 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
329 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
330 lio_dev->instr_queue[fw_mapped_iq]->max_count,
334 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
338 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
344 * Release the transmit queue/ringbuffer. Called by
348 * Opaque pointer to the transmit queue to release
354 lio_dev_tx_queue_release(void *txq)
356 struct lio_instr_queue *tq = txq;
357 struct lio_device *lio_dev = tq->lio_dev;
358 uint32_t fw_mapped_iq_no;
360 /* Run time queue deletion not supported */
361 if (lio_dev->port_configured)
366 lio_delete_sglist(tq);
368 fw_mapped_iq_no = tq->txpciq.s.q_no;
369 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
374 * Api to check link state.
377 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
379 struct lio_device *lio_dev = LIO_DEV(eth_dev);
380 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
381 struct lio_link_status_resp *resp;
382 union octeon_link_status *ls;
383 struct lio_soft_command *sc;
386 if (!lio_dev->intf_open)
389 resp_size = sizeof(struct lio_link_status_resp);
390 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
394 resp = (struct lio_link_status_resp *)sc->virtrptr;
395 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
396 LIO_OPCODE_INFO, 0, 0, 0);
398 /* Setting wait time in seconds */
399 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
401 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
402 goto get_status_fail;
404 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
405 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
410 goto get_status_fail;
412 ls = &resp->link_info.link;
414 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
416 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
417 lio_dev->linfo.link.link_status64 = ls->link_status64;
418 lio_dev_link_update(eth_dev, 0);
421 lio_free_soft_command(sc);
426 lio_free_soft_command(sc);
429 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
430 * and will update link state if it changes.
433 lio_sync_link_state_check(void *eth_dev)
435 struct lio_device *lio_dev =
436 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
438 if (lio_dev->port_configured)
439 lio_dev_get_link_status(eth_dev);
441 /* Schedule periodic link status check.
442 * Stop check if interface is close and start again while opening.
444 if (lio_dev->intf_open)
445 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
450 lio_dev_start(struct rte_eth_dev *eth_dev)
452 struct lio_device *lio_dev = LIO_DEV(eth_dev);
455 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
457 if (lio_dev->fn_list.enable_io_queues(lio_dev))
460 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
463 /* Ready for link status updates */
464 lio_dev->intf_open = 1;
467 /* start polling for lsc */
468 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
469 lio_sync_link_state_check,
473 "link state check handler creation failed\n");
474 goto dev_lsc_handle_error;
479 dev_lsc_handle_error:
480 lio_dev->intf_open = 0;
481 lio_send_rx_ctrl_cmd(eth_dev, 0);
486 static int lio_dev_configure(struct rte_eth_dev *eth_dev)
488 struct lio_device *lio_dev = LIO_DEV(eth_dev);
489 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
490 int retval, num_iqueues, num_oqueues;
491 uint8_t mac[ETHER_ADDR_LEN], i;
492 struct lio_if_cfg_resp *resp;
493 struct lio_soft_command *sc;
494 union lio_if_cfg if_cfg;
497 PMD_INIT_FUNC_TRACE();
499 /* Re-configuring firmware not supported.
500 * Can't change tx/rx queues per port from initial value.
502 if (lio_dev->port_configured) {
503 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
504 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
506 "rxq/txq re-conf not supported. Restart application with new value.\n");
512 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
513 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
515 resp_size = sizeof(struct lio_if_cfg_resp);
516 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
520 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
522 /* Firmware doesn't have capability to reconfigure the queues,
523 * Claim all queues, and use as many required
526 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
527 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
528 if_cfg.s.base_queue = 0;
530 if_cfg.s.gmx_port_id = lio_dev->pf_num;
532 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
533 LIO_OPCODE_IF_CFG, 0,
536 /* Setting wait time in seconds */
537 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
539 retval = lio_send_soft_command(lio_dev, sc);
540 if (retval == LIO_IQ_SEND_FAILED) {
541 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
543 /* Soft instr is freed by driver in case of failure. */
544 goto nic_config_fail;
547 /* Sleep on a wait queue till the cond flag indicates that the
548 * response arrived or timed-out.
550 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
551 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
552 lio_process_ordered_list(lio_dev);
556 retval = resp->status;
558 lio_dev_err(lio_dev, "iq/oq config failed\n");
559 goto nic_config_fail;
562 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
563 sizeof(struct octeon_if_cfg_info) >> 3);
565 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
566 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
568 if (!(num_iqueues) || !(num_oqueues)) {
570 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
571 (unsigned long)resp->cfg_info.iqmask,
572 (unsigned long)resp->cfg_info.oqmask);
573 goto nic_config_fail;
577 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
578 eth_dev->data->port_id,
579 (unsigned long)resp->cfg_info.iqmask,
580 (unsigned long)resp->cfg_info.oqmask,
581 num_iqueues, num_oqueues);
583 lio_dev->linfo.num_rxpciq = num_oqueues;
584 lio_dev->linfo.num_txpciq = num_iqueues;
586 for (i = 0; i < num_oqueues; i++) {
587 lio_dev->linfo.rxpciq[i].rxpciq64 =
588 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
589 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
590 i, lio_dev->linfo.rxpciq[i].s.q_no);
593 for (i = 0; i < num_iqueues; i++) {
594 lio_dev->linfo.txpciq[i].txpciq64 =
595 resp->cfg_info.linfo.txpciq[i].txpciq64;
596 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
597 i, lio_dev->linfo.txpciq[i].s.q_no);
600 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
601 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
602 lio_dev->linfo.link.link_status64 =
603 resp->cfg_info.linfo.link.link_status64;
605 /* 64-bit swap required on LE machines */
606 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
607 for (i = 0; i < ETHER_ADDR_LEN; i++)
608 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
611 /* Copy the permanent MAC address */
612 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
614 lio_dev->glist_lock =
615 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
616 if (lio_dev->glist_lock == NULL)
619 lio_dev->glist_head =
620 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
622 if (lio_dev->glist_head == NULL) {
623 rte_free(lio_dev->glist_lock);
624 lio_dev->glist_lock = NULL;
628 lio_dev_link_update(eth_dev, 0);
630 lio_dev->port_configured = 1;
632 lio_free_soft_command(sc);
634 /* Disable iq_0 for reconf */
635 lio_dev->fn_list.disable_io_queues(lio_dev);
638 lio_dev->fn_list.setup_device_regs(lio_dev);
640 /* Free iq_0 used during init */
641 lio_free_instr_queue0(lio_dev);
646 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
647 lio_free_soft_command(sc);
648 lio_free_instr_queue0(lio_dev);
653 /* Define our ethernet definitions */
654 static const struct eth_dev_ops liovf_eth_dev_ops = {
655 .dev_configure = lio_dev_configure,
656 .dev_start = lio_dev_start,
657 .link_update = lio_dev_link_update,
658 .rx_queue_setup = lio_dev_rx_queue_setup,
659 .rx_queue_release = lio_dev_rx_queue_release,
660 .tx_queue_setup = lio_dev_tx_queue_setup,
661 .tx_queue_release = lio_dev_tx_queue_release,
665 lio_check_pf_hs_response(void *lio_dev)
667 struct lio_device *dev = lio_dev;
669 /* check till response arrives */
670 if (dev->pfvf_hsword.coproc_tics_per_us)
673 cn23xx_vf_handle_mbox(dev);
675 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
679 * \brief Identify the LIO device and to map the BAR address space
680 * @param lio_dev lio device
683 lio_chip_specific_setup(struct lio_device *lio_dev)
685 struct rte_pci_device *pdev = lio_dev->pci_dev;
686 uint32_t dev_id = pdev->id.device_id;
691 case LIO_CN23XX_VF_VID:
692 lio_dev->chip_id = LIO_CN23XX_VF_VID;
693 ret = cn23xx_vf_setup_device(lio_dev);
698 lio_dev_err(lio_dev, "Unsupported Chip\n");
702 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
708 lio_first_time_init(struct lio_device *lio_dev,
709 struct rte_pci_device *pdev)
713 PMD_INIT_FUNC_TRACE();
715 /* set dpdk specific pci device pointer */
716 lio_dev->pci_dev = pdev;
718 /* Identify the LIO type and set device ops */
719 if (lio_chip_specific_setup(lio_dev)) {
720 lio_dev_err(lio_dev, "Chip specific setup failed\n");
724 /* Initialize soft command buffer pool */
725 if (lio_setup_sc_buffer_pool(lio_dev)) {
726 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
730 /* Initialize lists to manage the requests of different types that
731 * arrive from applications for this lio device.
733 lio_setup_response_list(lio_dev);
735 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
736 lio_dev_err(lio_dev, "Mailbox setup failed\n");
740 /* Check PF response */
741 lio_check_pf_hs_response((void *)lio_dev);
743 /* Do handshake and exit if incompatible PF driver */
744 if (cn23xx_pfvf_handshake(lio_dev))
748 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
749 /* Wait for FLR for 100ms per SRIOV specification */
752 if (cn23xx_vf_set_io_queues_off(lio_dev)) {
753 lio_dev_err(lio_dev, "Setting io queues off failed\n");
757 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
758 lio_dev_err(lio_dev, "Failed to configure device registers\n");
762 if (lio_setup_instr_queue0(lio_dev)) {
763 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
767 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
769 lio_dev->max_tx_queues = dpdk_queues;
770 lio_dev->max_rx_queues = dpdk_queues;
772 /* Enable input and output queues for this device */
773 if (lio_dev->fn_list.enable_io_queues(lio_dev))
779 lio_free_sc_buffer_pool(lio_dev);
780 if (lio_dev->mbox[0])
781 lio_dev->fn_list.free_mbox(lio_dev);
782 if (lio_dev->instr_queue[0])
783 lio_free_instr_queue0(lio_dev);
789 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
791 struct lio_device *lio_dev = LIO_DEV(eth_dev);
793 PMD_INIT_FUNC_TRACE();
795 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
798 /* lio_free_sc_buffer_pool */
799 lio_free_sc_buffer_pool(lio_dev);
801 rte_free(eth_dev->data->mac_addrs);
802 eth_dev->data->mac_addrs = NULL;
804 eth_dev->rx_pkt_burst = NULL;
805 eth_dev->tx_pkt_burst = NULL;
811 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
813 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
814 struct lio_device *lio_dev = LIO_DEV(eth_dev);
816 PMD_INIT_FUNC_TRACE();
818 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
819 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
821 /* Primary does the initialization. */
822 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
825 rte_eth_copy_pci_info(eth_dev, pdev);
826 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
828 if (pdev->mem_resource[0].addr) {
829 lio_dev->hw_addr = pdev->mem_resource[0].addr;
831 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
835 lio_dev->eth_dev = eth_dev;
836 /* set lio device print string */
837 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
838 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
839 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
841 lio_dev->port_id = eth_dev->data->port_id;
843 if (lio_first_time_init(lio_dev, pdev)) {
844 lio_dev_err(lio_dev, "Device init failed\n");
848 eth_dev->dev_ops = &liovf_eth_dev_ops;
849 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
850 if (eth_dev->data->mac_addrs == NULL) {
852 "MAC addresses memory allocation failed\n");
853 eth_dev->dev_ops = NULL;
854 eth_dev->rx_pkt_burst = NULL;
855 eth_dev->tx_pkt_burst = NULL;
859 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
862 lio_dev->port_configured = 0;
863 /* Always allow unicast packets */
864 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
869 /* Set of PCI devices this driver supports */
870 static const struct rte_pci_id pci_id_liovf_map[] = {
871 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
872 { .vendor_id = 0, /* sentinel */ }
875 static struct eth_driver rte_liovf_pmd = {
877 .id_table = pci_id_liovf_map,
878 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
879 .probe = rte_eth_dev_pci_probe,
880 .remove = rte_eth_dev_pci_remove,
882 .eth_dev_init = lio_eth_dev_init,
883 .eth_dev_uninit = lio_eth_dev_uninit,
884 .dev_private_size = sizeof(struct lio_device),
887 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd.pci_drv);
888 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
889 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");