X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Favp%2Favp_ethdev.c;h=761f6c1c4f7088dffbaa83d405206724228ea185;hb=8a70b112e6f019b8b2998767d536f797d935e2f8;hp=b37b31a5c017cf57e1394dd41c2b1ae756140604;hpb=8129545e314ec28a54ecca3d332cc50b7b387dec;p=dpdk.git diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c index b37b31a5c0..761f6c1c4f 100644 --- a/drivers/net/avp/avp_ethdev.c +++ b/drivers/net/avp/avp_ethdev.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (c) 2013-2017, Wind River Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1) Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2) Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation - * and/or other materials provided with the distribution. - * - * 3) Neither the name of Wind River Systems nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE - * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2013-2017 Wind River Systems, Inc. */ #include @@ -36,17 +8,19 @@ #include #include -#include +#include +#include #include #include -#include #include #include #include #include +#include #include #include #include +#include #include #include #include @@ -58,14 +32,19 @@ #include "avp_logs.h" +int avp_logtype_driver; +static int avp_dev_create(struct rte_pci_device *pci_dev, + struct rte_eth_dev *eth_dev); static int avp_dev_configure(struct rte_eth_dev *dev); +static int avp_dev_start(struct rte_eth_dev *dev); +static void avp_dev_stop(struct rte_eth_dev *dev); +static void avp_dev_close(struct rte_eth_dev *dev); static void avp_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); -static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); -static int avp_dev_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); +static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev); static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -101,14 +80,11 @@ static uint16_t avp_xmit_pkts(void *tx_queue, static void avp_dev_rx_queue_release(void *rxq); static void avp_dev_tx_queue_release(void *txq); -static void avp_dev_stats_get(struct rte_eth_dev *dev, +static int avp_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void avp_dev_stats_reset(struct rte_eth_dev *dev); -#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device) - - #define AVP_MAX_RX_BURST 64 #define AVP_MAX_TX_BURST 64 #define AVP_MAX_MAC_ADDRS 1 @@ -150,6 +126,9 @@ static const struct rte_pci_id pci_id_avp_map[] = { */ static const struct eth_dev_ops avp_eth_dev_ops = { .dev_configure = avp_dev_configure, + .dev_start = avp_dev_start, + .dev_stop = avp_dev_stop, + .dev_close = avp_dev_close, .dev_infos_get = avp_dev_info_get, .vlan_offload_set = avp_vlan_offload_set, .stats_get = avp_dev_stats_get, @@ -167,6 +146,7 @@ static const struct eth_dev_ops avp_eth_dev_ops = { #define AVP_F_PROMISC (1 << 1) #define AVP_F_CONFIGURED (1 << 2) #define AVP_F_LINKUP (1 << 3) +#define AVP_F_DETACHED (1 << 4) /**@} */ /* Ethernet device validation marker */ @@ -183,7 +163,7 @@ struct avp_dev { struct rte_eth_dev_data *dev_data; /**< Back pointer to ethernet device data */ volatile uint32_t flags; /**< Device operational flags */ - uint8_t port_id; /**< Ethernet port identifier */ + uint16_t port_id; /**< Ethernet port identifier */ struct rte_mempool *pool; /**< pkt mbuf mempool */ unsigned int guest_mbuf_size; /**< local pool mbuf size */ unsigned int host_mbuf_size; /**< host mbuf size */ @@ -202,6 +182,9 @@ struct avp_dev { struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES]; /**< To be freed mbufs queue */ + /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */ + rte_spinlock_t lock; + /* For request & response */ struct rte_avp_fifo *req_q; /**< Request queue */ struct rte_avp_fifo *resp_q; /**< Response queue */ @@ -314,6 +297,23 @@ done: return ret; } +static int +avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a link state change request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF; + request.if_up = state; + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + static int avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev, struct rte_avp_device_config *config) @@ -332,6 +332,22 @@ avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev, return ret == 0 ? request.result : ret; } +static int +avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_request request; + int ret; + + /* setup a shutdown request */ + memset(&request, 0, sizeof(request)); + request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE; + + ret = avp_dev_process_request(avp, &request); + + return ret == 0 ? request.result : ret; +} + /* translate from host mbuf virtual address to guest virtual address */ static inline void * avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address) @@ -344,9 +360,9 @@ avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address) /* translate from host physical address to guest virtual address */ static void * avp_dev_translate_address(struct rte_eth_dev *eth_dev, - phys_addr_t host_phys_addr) + rte_iova_t host_phys_addr) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_mem_resource *resource; struct rte_avp_memmap_info *info; struct rte_avp_memmap *map; @@ -367,7 +383,7 @@ avp_dev_translate_address(struct rte_eth_dev *eth_dev, (host_phys_addr < (map->phys_addr + map->length))) { /* address is within this segment */ offset += (host_phys_addr - map->phys_addr); - addr = RTE_PTR_ADD(addr, offset); + addr = RTE_PTR_ADD(addr, (uintptr_t)offset); PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n", host_phys_addr, addr); @@ -399,7 +415,7 @@ avp_dev_version_check(uint32_t version) static int avp_dev_check_regions(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_avp_memmap_info *memmap; struct rte_avp_device_info *info; struct rte_mem_resource *resource; @@ -456,6 +472,46 @@ avp_dev_check_regions(struct rte_eth_dev *eth_dev) return 0; } +static int +avp_dev_detach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(NOTICE, "port %u already detached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* shutdown the device first so the host stops sending us packets. */ + ret = avp_dev_ctrl_shutdown(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n", + ret); + avp->flags &= ~AVP_F_DETACHED; + goto unlock; + } + + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* wait for queues to acknowledge the presence of the detach flag */ + rte_delay_ms(1); + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + static void _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { @@ -495,7 +551,7 @@ _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) static void _avp_set_queue_counts(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct rte_avp_device_info *host_info; void *addr; @@ -524,6 +580,239 @@ _avp_set_queue_counts(struct rte_eth_dev *eth_dev) avp->num_tx_queues, avp->num_rx_queues); } +static int +avp_dev_attach(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_avp_device_config config; + unsigned int i; + int ret; + + PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n", + eth_dev->data->port_id, avp->device_id); + + rte_spinlock_lock(&avp->lock); + + if (!(avp->flags & AVP_F_DETACHED)) { + PMD_DRV_LOG(NOTICE, "port %u already attached\n", + eth_dev->data->port_id); + ret = 0; + goto unlock; + } + + /* + * make sure that the detached flag is set prior to reconfiguring the + * queues. + */ + avp->flags |= AVP_F_DETACHED; + rte_wmb(); + + /* + * re-run the device create utility which will parse the new host info + * and setup the AVP device queue pointers. + */ + ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n", + ret); + goto unlock; + } + + if (avp->flags & AVP_F_CONFIGURED) { + /* + * Update the receive queue mapping to handle cases where the + * source and destination hosts have different queue + * requirements. As long as the DETACHED flag is asserted the + * queue table should not be referenced so it should be safe to + * update it. + */ + _avp_set_queue_counts(eth_dev); + for (i = 0; i < eth_dev->data->nb_rx_queues; i++) + _avp_set_rx_queue_mappings(eth_dev, i); + + /* + * Update the host with our config details so that it knows the + * device is active. + */ + memset(&config, 0, sizeof(config)); + config.device_id = avp->device_id; + config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; + config.driver_version = AVP_DPDK_DRIVER_VERSION; + config.features = avp->features; + config.num_tx_queues = avp->num_tx_queues; + config.num_rx_queues = avp->num_rx_queues; + config.if_up = !!(avp->flags & AVP_F_LINKUP); + + ret = avp_dev_ctrl_set_config(eth_dev, &config); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", + ret); + goto unlock; + } + } + + rte_wmb(); + avp->flags &= ~AVP_F_DETACHED; + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static void +avp_dev_interrupt_handler(void *data) +{ + struct rte_eth_dev *eth_dev = data; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t status, value; + int ret; + + if (registers == NULL) + rte_panic("no mapped MMIO register space\n"); + + /* read the interrupt status register + * note: this register clears on read so all raised interrupts must be + * handled or remembered for later processing + */ + status = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_INTERRUPT_STATUS_OFFSET)); + + if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) { + /* handle interrupt based on current status */ + value = AVP_READ32( + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + switch (value) { + case RTE_AVP_MIGRATION_DETACHED: + ret = avp_dev_detach(eth_dev); + break; + case RTE_AVP_MIGRATION_ATTACHED: + ret = avp_dev_attach(eth_dev); + break; + default: + PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n", + value); + ret = -EINVAL; + } + + /* acknowledge the request by writing out our current status */ + value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR); + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + + PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n"); + } + + if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK) + PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n", + status); + + /* re-enable UIO interrupt handling */ + ret = rte_intr_enable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n", + ret); + /* continue */ + } +} + +static int +avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return -EINVAL; + + /* enable UIO interrupt handling */ + ret = rte_intr_enable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + /* inform the device that all interrupts are enabled */ + AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + return 0; +} + +static int +avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + int ret; + + if (registers == NULL) + return 0; + + /* inform the device that all interrupts are disabled */ + AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK, + RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); + + /* enable UIO interrupt handling */ + ret = rte_intr_disable(&pci_dev->intr_handle); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n", + ret); + return ret; + } + + return 0; +} + +static int +avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + int ret; + + /* register a callback handler with UIO for interrupt notifications */ + ret = rte_intr_callback_register(&pci_dev->intr_handle, + avp_dev_interrupt_handler, + (void *)eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n", + ret); + return ret; + } + + /* enable interrupt processing */ + return avp_dev_enable_interrupts(eth_dev); +} + +static int +avp_dev_migration_pending(struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; + uint32_t value; + + if (registers == NULL) + return 0; + + value = AVP_READ32(RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_STATUS_OFFSET)); + if (value == RTE_AVP_MIGRATION_DETACHED) { + /* migration is in progress; ack it if we have not already */ + AVP_WRITE32(value, + RTE_PTR_ADD(registers, + RTE_AVP_MIGRATION_ACK_OFFSET)); + return 1; + } + return 0; +} + /* * create a AVP device using the supplied device info by first translating it * to guest address space(s). @@ -576,6 +865,7 @@ avp_dev_create(struct rte_pci_device *pci_dev, avp->port_id = eth_dev->data->port_id; avp->host_mbuf_size = host_info->mbuf_size; avp->host_features = host_info->features; + rte_spinlock_init(&avp->lock); memcpy(&avp->ethaddr.addr_bytes[0], host_info->ethaddr, ETHER_ADDR_LEN); /* adjust max values to not exceed our max */ @@ -665,7 +955,7 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; int ret; - pci_dev = AVP_DEV_TO_PCI(eth_dev); + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); eth_dev->dev_ops = &avp_eth_dev_ops; eth_dev->rx_pkt_burst = &avp_recv_pkts; eth_dev->tx_pkt_burst = &avp_xmit_pkts; @@ -687,7 +977,11 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; + /* Check current migration status */ + if (avp_dev_migration_pending(eth_dev)) { + PMD_DRV_LOG(ERR, "VM live migration operation in progress\n"); + return -EBUSY; + } /* Check BAR resources */ ret = avp_dev_check_regions(eth_dev); @@ -697,6 +991,13 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) return ret; } + /* Enable interrupts */ + ret = avp_dev_setup_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret); + return ret; + } + /* Handle each subtype */ ret = avp_dev_create(pci_dev, eth_dev); if (ret < 0) { @@ -721,12 +1022,20 @@ eth_avp_dev_init(struct rte_eth_dev *eth_dev) static int eth_avp_dev_uninit(struct rte_eth_dev *eth_dev) { + int ret; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) return -EPERM; if (eth_dev->data == NULL) return 0; + ret = avp_dev_disable_interrupts(eth_dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret); + return ret; + } + if (eth_dev->data->mac_addrs != NULL) { rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; @@ -735,17 +1044,26 @@ eth_avp_dev_uninit(struct rte_eth_dev *eth_dev) return 0; } +static int +eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), + eth_avp_dev_init); +} -static struct eth_driver rte_avp_pmd = { - { - .id_table = pci_id_avp_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, - }, - .eth_dev_init = eth_avp_dev_init, - .eth_dev_uninit = eth_avp_dev_uninit, - .dev_private_size = sizeof(struct avp_adapter), +static int +eth_avp_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, + eth_avp_dev_uninit); +} + +static struct rte_pci_driver rte_avp_pmd = { + .id_table = pci_id_avp_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_avp_pci_probe, + .remove = eth_avp_pci_remove, }; static int @@ -1003,7 +1321,7 @@ avp_dev_copy_from_buffers(struct avp_dev *avp, src_offset = 0; if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { - ol_flags = PKT_RX_VLAN_PKT; + ol_flags = PKT_RX_VLAN; vlan_tci = pkt_buf->vlan_tci; } else { ol_flags = 0; @@ -1088,6 +1406,11 @@ avp_recv_scattered_pkts(void *rx_queue, unsigned int port_id; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + guest_mbuf_size = avp->guest_mbuf_size; port_id = avp->port_id; rx_q = avp->rx_q[rxq->queue_id]; @@ -1182,6 +1505,11 @@ avp_recv_pkts(void *rx_queue, char *pkt_data; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + return 0; + } + rx_q = avp->rx_q[rxq->queue_id]; free_q = avp->free_q[rxq->queue_id]; @@ -1251,7 +1579,7 @@ avp_recv_pkts(void *rx_queue, m->port = avp->port_id; if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { - m->ol_flags = PKT_RX_VLAN_PKT; + m->ol_flags = PKT_RX_VLAN; m->vlan_tci = pkt_buf->vlan_tci; } @@ -1389,6 +1717,13 @@ avp_xmit_scattered_pkts(void *tx_queue, unsigned int i; orig_nb_pkts = nb_pkts; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop? */ + txq->errors += nb_pkts; + return 0; + } + tx_q = avp->tx_q[txq->queue_id]; alloc_q = avp->alloc_q[txq->queue_id]; @@ -1501,6 +1836,13 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) char *pkt_data; unsigned int i; + if (unlikely(avp->flags & AVP_F_DETACHED)) { + /* VM live migration in progress */ + /* TODO ... buffer for X packets then drop?! */ + txq->errors++; + return 0; + } + tx_q = avp->tx_q[txq->queue_id]; alloc_q = avp->alloc_q[txq->queue_id]; @@ -1625,7 +1967,7 @@ avp_dev_tx_queue_release(void *tx_queue) static int avp_dev_configure(struct rte_eth_dev *eth_dev) { - struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); struct rte_avp_device_info *host_info; struct rte_avp_device_config config; @@ -1633,6 +1975,13 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) void *addr; int ret; + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } + addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; host_info = (struct rte_avp_device_info *)addr; @@ -1642,7 +1991,12 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) mask = (ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK); - avp_vlan_offload_set(eth_dev, mask); + ret = avp_vlan_offload_set(eth_dev, mask); + if (ret < 0) { + PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n", + ret); + goto unlock; + } /* update device config */ memset(&config, 0, sizeof(config)); @@ -1664,9 +2018,100 @@ avp_dev_configure(struct rte_eth_dev *eth_dev) ret = 0; unlock: + rte_spinlock_unlock(&avp->lock); + return ret; +} + +static int +avp_dev_start(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + ret = -ENOTSUP; + goto unlock; + } + + /* update link state */ + ret = avp_dev_ctrl_set_link_state(eth_dev, 1); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", + ret); + goto unlock; + } + + /* remember current link state */ + avp->flags |= AVP_F_LINKUP; + + ret = 0; + +unlock: + rte_spinlock_unlock(&avp->lock); return ret; } +static void +avp_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + + /* remember current link state */ + avp->flags &= ~AVP_F_LINKUP; + + /* update link state */ + ret = avp_dev_ctrl_set_link_state(eth_dev, 0); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", + ret); + } + +unlock: + rte_spinlock_unlock(&avp->lock); +} + +static void +avp_dev_close(struct rte_eth_dev *eth_dev) +{ + struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int ret; + + rte_spinlock_lock(&avp->lock); + if (avp->flags & AVP_F_DETACHED) { + PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); + goto unlock; + } + + /* remember current link state */ + avp->flags &= ~AVP_F_LINKUP; + avp->flags &= ~AVP_F_CONFIGURED; + + ret = avp_dev_disable_interrupts(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to disable interrupts\n"); + /* continue */ + } + + /* update device state */ + ret = avp_dev_ctrl_shutdown(eth_dev); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n", + ret); + /* continue */ + } + +unlock: + rte_spinlock_unlock(&avp->lock); +} static int avp_dev_link_update(struct rte_eth_dev *eth_dev, @@ -1687,11 +2132,13 @@ avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + rte_spinlock_lock(&avp->lock); if ((avp->flags & AVP_F_PROMISC) == 0) { avp->flags |= AVP_F_PROMISC; PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n", eth_dev->data->port_id); } + rte_spinlock_unlock(&avp->lock); } static void @@ -1699,11 +2146,13 @@ avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + rte_spinlock_lock(&avp->lock); if ((avp->flags & AVP_F_PROMISC) != 0) { avp->flags &= ~AVP_F_PROMISC; PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n", eth_dev->data->port_id); } + rte_spinlock_unlock(&avp->lock); } static void @@ -1712,8 +2161,6 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - dev_info->driver_name = "rte_avp_pmd"; - dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device); dev_info->max_rx_queues = avp->max_rx_queues; dev_info->max_tx_queues = avp->max_tx_queues; dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; @@ -1723,16 +2170,19 @@ avp_dev_info_get(struct rte_eth_dev *eth_dev, dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; } + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_CRC_STRIP; } -static void +static int avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; + uint64_t offloads = dev_conf->rxmode.offloads; if (mask & ETH_VLAN_STRIP_MASK) { if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; else avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; @@ -1742,17 +2192,19 @@ avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) } if (mask & ETH_VLAN_FILTER_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter) + if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); } if (mask & ETH_VLAN_EXTEND_MASK) { - if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend) + if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); } + + return 0; } -static void +static int avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) { struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); @@ -1785,6 +2237,8 @@ avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) stats->q_errors[i] += txq->errors; } } + + return 0; } static void @@ -1814,5 +2268,12 @@ avp_dev_stats_reset(struct rte_eth_dev *eth_dev) } } -RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv); +RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map); + +RTE_INIT(avp_init_log) +{ + avp_logtype_driver = rte_log_register("pmd.net.avp.driver"); + if (avp_logtype_driver >= 0) + rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE); +}