X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fliquidio%2Flio_ethdev.c;h=de234e903bc6c7a4b46c9a49ee1734bc7f814521;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=479936a52ff9a12844a2d4ded69782122c5d2ae5;hpb=dc7037ba73f84c8bc5923813e0398f3c1c28de9d;p=dpdk.git diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c index 479936a52f..de234e903b 100644 --- a/drivers/net/liquidio/lio_ethdev.c +++ b/drivers/net/liquidio/lio_ethdev.c @@ -1,37 +1,9 @@ -/* - * BSD LICENSE - * - * Copyright(c) 2017 Cavium, Inc.. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Cavium, Inc */ -#include +#include +#include #include #include #include @@ -43,6 +15,9 @@ #include "lio_ethdev.h" #include "lio_rxtx.h" +int lio_logtype_init; +int lio_logtype_driver; + /* Default RSS key in use */ static uint8_t lio_rss_key[40] = { 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, @@ -311,7 +286,7 @@ lio_dev_xstats_reset(struct rte_eth_dev *eth_dev) } /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */ -static void +static int lio_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) { @@ -359,6 +334,8 @@ lio_dev_stats_get(struct rte_eth_dev *eth_dev, stats->ibytes = bytes; stats->ipackets = pkts; stats->ierrors = drop; + + return 0; } static void @@ -397,12 +374,14 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev, struct lio_device *lio_dev = LIO_DEV(eth_dev); struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - devinfo->pci_dev = pci_dev; - switch (pci_dev->id.subsystem_device_id) { /* CN23xx 10G cards */ case PCI_SUBSYS_DEV_ID_CN2350_210: case PCI_SUBSYS_DEV_ID_CN2360_210: + case PCI_SUBSYS_DEV_ID_CN2350_210SVPN3: + case PCI_SUBSYS_DEV_ID_CN2360_210SVPN3: + case PCI_SUBSYS_DEV_ID_CN2350_210SVPT: + case PCI_SUBSYS_DEV_ID_CN2360_210SVPT: devinfo->speed_capa = ETH_LINK_SPEED_10G; break; /* CN23xx 25G cards */ @@ -411,8 +390,9 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev, devinfo->speed_capa = ETH_LINK_SPEED_25G; break; default: + devinfo->speed_capa = ETH_LINK_SPEED_10G; lio_dev_err(lio_dev, - "Unknown CN23XX subsystem device id. Not setting speed capability.\n"); + "Unknown CN23XX subsystem device id. Setting 10G as default link speed.\n"); } devinfo->max_rx_queues = lio_dev->max_rx_queues; @@ -446,29 +426,66 @@ lio_dev_info_get(struct rte_eth_dev *eth_dev, } static int -lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu) +lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { struct lio_device *lio_dev = LIO_DEV(eth_dev); + uint16_t pf_mtu = lio_dev->linfo.link.s.mtu; + uint32_t frame_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; PMD_INIT_FUNC_TRACE(); if (!lio_dev->intf_open) { - lio_dev_err(lio_dev, "Port %d down, can't check MTU\n", + lio_dev_err(lio_dev, "Port %d down, can't set MTU\n", lio_dev->port_id); return -EINVAL; } - /* Limit the MTU to make sure the ethernet packets are between - * ETHER_MIN_MTU bytes and PF's MTU + /* check if VF MTU is within allowed range. + * New value should not exceed PF MTU. */ - if ((new_mtu < ETHER_MIN_MTU) || - (new_mtu > lio_dev->linfo.link.s.mtu)) { - lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu); - lio_dev_err(lio_dev, "Valid range %d and %d\n", - ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu); + if ((mtu < ETHER_MIN_MTU) || (mtu > pf_mtu)) { + lio_dev_err(lio_dev, "VF MTU should be >= %d and <= %d\n", + ETHER_MIN_MTU, pf_mtu); return -EINVAL; } + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_MTU; + ctrl_pkt.ncmd.s.param1 = mtu; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send command to change MTU\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Command to change MTU timed out\n"); + return -1; + } + + if (frame_len > ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + else + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len; + eth_dev->data->mtu = mtu; + return 0; } @@ -888,32 +905,6 @@ lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) return 0; } -/** - * Atomically writes the link status information into global - * structure rte_eth_dev. - * - * @param eth_dev - * - Pointer to the structure rte_eth_dev to read from. - * - Pointer to the buffer to be saved with the link status. - * - * @return - * - On success, zero. - * - On failure, negative value. - */ -static inline int -lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev, - struct rte_eth_link *link) -{ - struct rte_eth_link *dst = ð_dev->data->dev_link; - struct rte_eth_link *src = link; - - if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, - *(uint64_t *)src) == 0) - return -1; - - return 0; -} - static uint64_t lio_hweight64(uint64_t w) { @@ -933,22 +924,19 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete __rte_unused) { struct lio_device *lio_dev = LIO_DEV(eth_dev); - struct rte_eth_link link, old; + struct rte_eth_link link; /* Initialize */ + memset(&link, 0, sizeof(link)); link.link_status = ETH_LINK_DOWN; link.link_speed = ETH_SPEED_NUM_NONE; link.link_duplex = ETH_LINK_HALF_DUPLEX; - memset(&old, 0, sizeof(old)); + link.link_autoneg = ETH_LINK_AUTONEG; /* Return what we found */ if (lio_dev->linfo.link.s.link_up == 0) { /* Interface is down */ - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - if (link.link_status == old.link_status) - return -1; - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } link.link_status = ETH_LINK_UP; /* Interface is up */ @@ -965,13 +953,7 @@ lio_dev_link_update(struct rte_eth_dev *eth_dev, link.link_duplex = ETH_LINK_HALF_DUPLEX; } - if (lio_dev_atomic_write_link_status(eth_dev, &link)) - return -1; - - if (link.link_status == old.link_status) - return -1; - - return 0; + return rte_eth_linkstatus_set(eth_dev, &link); } /** @@ -1010,6 +992,48 @@ lio_change_dev_flag(struct rte_eth_dev *eth_dev) lio_dev_err(lio_dev, "Change dev flag command timed out\n"); } +static void +lio_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_VF_TRUST_MIN_VERSION); + return; + } + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't enable promiscuous\n", + lio_dev->port_id); + return; + } + + lio_dev->ifflags |= LIO_IFFLAG_PROMISC; + lio_change_dev_flag(eth_dev); +} + +static void +lio_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (strcmp(lio_dev->firmware_version, LIO_VF_TRUST_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_VF_TRUST_MIN_VERSION); + return; + } + + if (!lio_dev->intf_open) { + lio_dev_err(lio_dev, "Port %d down, can't disable promiscuous\n", + lio_dev->port_id); + return; + } + + lio_dev->ifflags &= ~LIO_IFFLAG_PROMISC; + lio_change_dev_flag(eth_dev); +} + static void lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { @@ -1143,12 +1167,10 @@ lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no; - if ((lio_dev->droq[fw_mapped_oq]) && - (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) { - lio_dev_err(lio_dev, - "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n", - lio_dev->droq[fw_mapped_oq]->max_count); - return -ENOTSUP; + /* Free previous allocation if any */ + if (eth_dev->data->rx_queues[q_no] != NULL) { + lio_dev_rx_queue_release(eth_dev->data->rx_queues[q_no]); + eth_dev->data->rx_queues[q_no] = NULL; } mbp_priv = rte_mempool_get_priv(mp); @@ -1182,10 +1204,6 @@ lio_dev_rx_queue_release(void *rxq) int oq_no; if (droq) { - /* Run time queue deletion not supported */ - if (droq->lio_dev->port_configured) - return; - oq_no = droq->q_no; lio_delete_droq_queue(droq->lio_dev, oq_no); } @@ -1229,12 +1247,10 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no); - if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) && - (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) { - lio_dev_err(lio_dev, - "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n", - lio_dev->instr_queue[fw_mapped_iq]->max_count); - return -ENOTSUP; + /* Free previous allocation if any */ + if (eth_dev->data->tx_queues[q_no] != NULL) { + lio_dev_tx_queue_release(eth_dev->data->tx_queues[q_no]); + eth_dev->data->tx_queues[q_no] = NULL; } retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no], @@ -1246,7 +1262,7 @@ lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no, } retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq, - lio_dev->instr_queue[fw_mapped_iq]->max_count, + lio_dev->instr_queue[fw_mapped_iq]->nb_desc, socket_id); if (retval) { @@ -1277,10 +1293,6 @@ lio_dev_tx_queue_release(void *txq) if (tq) { - /* Run time queue deletion not supported */ - if (tq->lio_dev->port_configured) - return; - /* Free sg_list */ lio_delete_sglist(tq); @@ -1333,6 +1345,11 @@ lio_dev_get_link_status(struct rte_eth_dev *eth_dev) lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3); if (lio_dev->linfo.link.link_status64 != ls->link_status64) { + if (ls->s.mtu < eth_dev->data->mtu) { + lio_dev_info(lio_dev, "Lowered VF MTU to %d as PF MTU dropped\n", + ls->s.mtu); + eth_dev->data->mtu = ls->s.mtu; + } lio_dev->linfo.link.link_status64 = ls->link_status64; lio_dev_link_update(eth_dev, 0); } @@ -1389,6 +1406,11 @@ lio_dev_start(struct rte_eth_dev *eth_dev) /* Configure RSS if device configured with multiple RX queues. */ lio_dev_mq_rx_configure(eth_dev); + /* Before update the link info, + * must set linfo.link.link_status64 to 0. + */ + lio_dev->linfo.link.link_status64 = 0; + /* start polling for lsc */ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check, @@ -1404,35 +1426,22 @@ lio_dev_start(struct rte_eth_dev *eth_dev) if (lio_dev->linfo.link.link_status64 == 0) { ret = -1; - goto dev_mtu_check_error; + goto dev_mtu_set_error; } - if (eth_dev->data->dev_conf.rxmode.jumbo_frame == 1) { - if (frame_len <= ETHER_MAX_LEN || - frame_len > LIO_MAX_RX_PKTLEN) { - lio_dev_err(lio_dev, "max packet length should be >= %d and < %d when jumbo frame is enabled\n", - ETHER_MAX_LEN, LIO_MAX_RX_PKTLEN); - ret = -EINVAL; - goto dev_mtu_check_error; - } - mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN); - } else { - /* default MTU */ - mtu = ETHER_MTU; - eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN; - } + mtu = (uint16_t)(frame_len - ETHER_HDR_LEN - ETHER_CRC_LEN); + if (mtu < ETHER_MIN_MTU) + mtu = ETHER_MIN_MTU; - if (lio_dev->linfo.link.s.mtu != mtu) { - ret = lio_dev_validate_vf_mtu(eth_dev, mtu); + if (eth_dev->data->mtu != mtu) { + ret = lio_dev_mtu_set(eth_dev, mtu); if (ret) - goto dev_mtu_check_error; + goto dev_mtu_set_error; } - eth_dev->data->mtu = mtu; - return 0; -dev_mtu_check_error: +dev_mtu_set_error: rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev); dev_lsc_handle_error: @@ -1457,6 +1466,8 @@ lio_dev_stop(struct rte_eth_dev *eth_dev) lio_send_rx_ctrl_cmd(eth_dev, 0); + lio_wait_for_instr_fetch(lio_dev); + /* Clear recorded link status */ lio_dev->linfo.link.link_status64 = 0; } @@ -1530,38 +1541,20 @@ static void lio_dev_close(struct rte_eth_dev *eth_dev) { struct lio_device *lio_dev = LIO_DEV(eth_dev); - uint32_t i; lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id); if (lio_dev->intf_open) lio_dev_stop(eth_dev); - lio_wait_for_instr_fetch(lio_dev); - - lio_dev->fn_list.disable_io_queues(lio_dev); - - cn23xx_vf_set_io_queues_off(lio_dev); - - /* Reset iq regs (IQ_DBELL). - * Clear sli_pktx_cnts (OQ_PKTS_SENT). - */ - for (i = 0; i < lio_dev->nb_rx_queues; i++) { - struct lio_droq *droq = lio_dev->droq[i]; - - if (droq == NULL) - break; - - uint32_t pkt_count = rte_read32(droq->pkts_sent_reg); + /* Reset ioq regs */ + lio_dev->fn_list.setup_device_regs(lio_dev); - lio_dev_dbg(lio_dev, - "pending oq count %u\n", pkt_count); - rte_write32(pkt_count, droq->pkts_sent_reg); + if (lio_dev->pci_dev->kdrv == RTE_KDRV_IGB_UIO) { + cn23xx_vf_ask_pf_to_do_flr(lio_dev); + rte_delay_ms(LIO_PCI_FLR_WAIT); } - /* Do FLR for the VF */ - cn23xx_vf_ask_pf_to_do_flr(lio_dev); - /* lio_free_mbox */ lio_dev->fn_list.free_mbox(lio_dev); @@ -1645,7 +1638,76 @@ lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev) lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n"); } -static int lio_dev_configure(struct rte_eth_dev *eth_dev) +static int +lio_send_queue_count_update(struct rte_eth_dev *eth_dev, int num_txq, + int num_rxq) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + struct lio_dev_ctrl_cmd ctrl_cmd; + struct lio_ctrl_pkt ctrl_pkt; + + if (strcmp(lio_dev->firmware_version, LIO_Q_RECONF_MIN_VERSION) < 0) { + lio_dev_err(lio_dev, "Require firmware version >= %s\n", + LIO_Q_RECONF_MIN_VERSION); + return -ENOTSUP; + } + + /* flush added to prevent cmd failure + * incase the queue is full + */ + lio_flush_iq(lio_dev, lio_dev->instr_queue[0]); + + memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt)); + memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd)); + + ctrl_cmd.eth_dev = eth_dev; + ctrl_cmd.cond = 0; + + ctrl_pkt.ncmd.s.cmd = LIO_CMD_QUEUE_COUNT_CTL; + ctrl_pkt.ncmd.s.param1 = num_txq; + ctrl_pkt.ncmd.s.param2 = num_rxq; + ctrl_pkt.ctrl_cmd = &ctrl_cmd; + + if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) { + lio_dev_err(lio_dev, "Failed to send queue count control command\n"); + return -1; + } + + if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) { + lio_dev_err(lio_dev, "Queue count control command timed out\n"); + return -1; + } + + return 0; +} + +static int +lio_reconf_queues(struct rte_eth_dev *eth_dev, int num_txq, int num_rxq) +{ + struct lio_device *lio_dev = LIO_DEV(eth_dev); + + if (lio_dev->nb_rx_queues != num_rxq || + lio_dev->nb_tx_queues != num_txq) { + if (lio_send_queue_count_update(eth_dev, num_txq, num_rxq)) + return -1; + lio_dev->nb_rx_queues = num_rxq; + lio_dev->nb_tx_queues = num_txq; + } + + if (lio_dev->intf_open) + lio_dev_stop(eth_dev); + + /* Reset ioq registers */ + if (lio_dev->fn_list.setup_device_regs(lio_dev)) { + lio_dev_err(lio_dev, "Failed to configure device registers\n"); + return -1; + } + + return 0; +} + +static int +lio_dev_configure(struct rte_eth_dev *eth_dev) { struct lio_device *lio_dev = LIO_DEV(eth_dev); uint16_t timeout = LIO_MAX_CMD_TIMEOUT; @@ -1658,22 +1720,21 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); - /* Re-configuring firmware not supported. - * Can't change tx/rx queues per port from initial value. + /* Inform firmware about change in number of queues to use. + * Disable IO queues and reset registers for re-configuration. */ - if (lio_dev->port_configured) { - if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) || - (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) { - lio_dev_err(lio_dev, - "rxq/txq re-conf not supported. Restart application with new value.\n"); - return -ENOTSUP; - } - return 0; - } + if (lio_dev->port_configured) + return lio_reconf_queues(eth_dev, + eth_dev->data->nb_tx_queues, + eth_dev->data->nb_rx_queues); lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues; lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues; + /* Set max number of queues which can be re-configured. */ + lio_dev->max_rx_queues = eth_dev->data->nb_rx_queues; + lio_dev->max_tx_queues = eth_dev->data->nb_tx_queues; + resp_size = sizeof(struct lio_if_cfg_resp); sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0); if (sc == NULL) @@ -1721,6 +1782,9 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev) goto nic_config_fail; } + strlcpy(lio_dev->firmware_version, + resp->cfg_info.lio_firmware_version, LIO_FW_VERSION_LENGTH); + lio_swap_8B_data((uint64_t *)(&resp->cfg_info), sizeof(struct octeon_if_cfg_info) >> 3); @@ -1771,7 +1835,8 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev) 2 + i)); /* Copy the permanent MAC address */ - ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]); + ether_addr_copy((struct rte_ether_addr *)mac, + ð_dev->data->mac_addrs[0]); /* enable firmware checksum support for tunnel packets */ lio_enable_hw_tunnel_rx_checksum(eth_dev); @@ -1797,9 +1862,6 @@ static int lio_dev_configure(struct rte_eth_dev *eth_dev) lio_free_soft_command(sc); - /* Disable iq_0 for reconf */ - lio_dev->fn_list.disable_io_queues(lio_dev); - /* Reset ioq regs */ lio_dev->fn_list.setup_device_regs(lio_dev); @@ -1824,6 +1886,8 @@ static const struct eth_dev_ops liovf_eth_dev_ops = { .dev_set_link_up = lio_dev_set_link_up, .dev_set_link_down = lio_dev_set_link_down, .dev_close = lio_dev_close, + .promiscuous_enable = lio_dev_promiscuous_enable, + .promiscuous_disable = lio_dev_promiscuous_disable, .allmulticast_enable = lio_dev_allmulticast_enable, .allmulticast_disable = lio_dev_allmulticast_disable, .link_update = lio_dev_link_update, @@ -1844,6 +1908,7 @@ static const struct eth_dev_ops liovf_eth_dev_ops = { .rss_hash_update = lio_dev_rss_hash_update, .udp_tunnel_port_add = lio_dev_udp_tunnel_add, .udp_tunnel_port_del = lio_dev_udp_tunnel_del, + .mtu_set = lio_dev_mtu_set, }; static void @@ -1929,14 +1994,11 @@ lio_first_time_init(struct lio_device *lio_dev, if (cn23xx_pfvf_handshake(lio_dev)) goto error; - /* Initial reset */ - cn23xx_vf_ask_pf_to_do_flr(lio_dev); - /* Wait for FLR for 100ms per SRIOV specification */ - rte_delay_ms(100); - - if (cn23xx_vf_set_io_queues_off(lio_dev)) { - lio_dev_err(lio_dev, "Setting io queues off failed\n"); - goto error; + /* Request and wait for device reset. */ + if (pdev->kdrv == RTE_KDRV_IGB_UIO) { + cn23xx_vf_ask_pf_to_do_flr(lio_dev); + /* FLR wait time doubled as a precaution. */ + rte_delay_ms(LIO_PCI_FLR_WAIT * 2); } if (lio_dev->fn_list.setup_device_regs(lio_dev)) { @@ -1978,14 +2040,11 @@ lio_eth_dev_uninit(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; /* lio_free_sc_buffer_pool */ lio_free_sc_buffer_pool(lio_dev); - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -2009,7 +2068,6 @@ lio_eth_dev_init(struct rte_eth_dev *eth_dev) return 0; rte_eth_copy_pci_info(eth_dev, pdev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; if (pdev->mem_resource[0].addr) { lio_dev->hw_addr = pdev->mem_resource[0].addr; @@ -2056,19 +2114,8 @@ static int lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { - struct rte_eth_dev *eth_dev; - int ret; - - eth_dev = rte_eth_dev_pci_allocate(pci_dev, - sizeof(struct lio_device)); - if (eth_dev == NULL) - return -ENOMEM; - - ret = lio_eth_dev_init(eth_dev); - if (ret) - rte_eth_dev_pci_release(eth_dev); - - return ret; + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct lio_device), + lio_eth_dev_init); } static int @@ -2094,3 +2141,13 @@ static struct rte_pci_driver rte_liovf_pmd = { RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map); RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci"); + +RTE_INIT(lio_init_log) +{ + lio_logtype_init = rte_log_register("pmd.net.liquidio.init"); + if (lio_logtype_init >= 0) + rte_log_set_level(lio_logtype_init, RTE_LOG_NOTICE); + lio_logtype_driver = rte_log_register("pmd.net.liquidio.driver"); + if (lio_logtype_driver >= 0) + rte_log_set_level(lio_logtype_driver, RTE_LOG_NOTICE); +}