-/*
- * BSD LICENSE
- *
- * Copyright (c) 2013-2017, Wind River Systems, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1) Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2) Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3) Neither the name of Wind River Systems nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2013-2017 Wind River Systems, Inc.
*/
#include <stdint.h>
#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include "avp_logs.h"
+int avp_logtype_driver;
static int avp_dev_create(struct rte_pci_device *pci_dev,
struct rte_eth_dev *eth_dev);
#define AVP_MAX_RX_BURST 64
#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
-#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+#define AVP_MIN_RX_BUFSIZE RTE_ETHER_MIN_LEN
/*
struct avp_dev {
uint32_t magic; /**< Memory validation marker */
uint64_t device_id; /**< Unique system identifier */
- struct ether_addr ethaddr; /**< Host specified MAC address */
+ struct rte_ether_addr ethaddr; /**< Host specified MAC address */
struct rte_eth_dev_data *dev_data;
/**< Back pointer to ethernet device data */
volatile uint32_t flags; /**< Device operational flags */
(host_phys_addr < (map->phys_addr + map->length))) {
/* address is within this segment */
offset += (host_phys_addr - map->phys_addr);
- addr = RTE_PTR_ADD(addr, offset);
+ addr = RTE_PTR_ADD(addr, (uintptr_t)offset);
PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
host_phys_addr, addr);
avp->host_features = host_info->features;
rte_spinlock_init(&avp->lock);
memcpy(&avp->ethaddr.addr_bytes[0],
- host_info->ethaddr, ETHER_ADDR_LEN);
+ host_info->ethaddr, RTE_ETHER_ADDR_LEN);
/* adjust max values to not exceed our max */
avp->max_tx_queues =
RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
eth_dev->dev_ops = &avp_eth_dev_ops;
eth_dev->rx_pkt_burst = &avp_recv_pkts;
eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+ /* Let rte_eth_dev_close() release the port resources */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/*
}
/* Allocate memory for storing MAC addresses */
- eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev",
+ RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
- ETHER_ADDR_LEN);
+ RTE_ETHER_ADDR_LEN);
return -ENOMEM;
}
/* Get a mac from device config */
- ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]);
return 0;
}
static int
eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
{
- int ret;
-
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
if (eth_dev->data == NULL)
return 0;
- ret = avp_dev_disable_interrupts(eth_dev);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
- return ret;
- }
-
- if (eth_dev->data->mac_addrs != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- }
+ avp_dev_close(eth_dev);
return 0;
}
eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- struct rte_eth_dev *eth_dev;
- int ret;
-
- eth_dev = rte_eth_dev_pci_allocate(pci_dev,
- sizeof(struct avp_adapter));
- if (eth_dev == NULL)
- return -ENOMEM;
-
- ret = eth_avp_dev_init(eth_dev);
- if (ret)
- rte_eth_dev_pci_release(eth_dev);
-
- return ret;
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter),
+ eth_avp_dev_init);
}
static int
}
static inline int
-_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+_avp_cmp_ether_addr(struct rte_ether_addr *a, struct rte_ether_addr *b)
{
uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
static inline int
_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
{
- struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+ struct rte_ether_hdr *eth = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
/* allow all packets destined to our address */
return 0;
}
- if (likely(is_broadcast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_broadcast_ether_addr(ð->d_addr))) {
/* allow all broadcast packets */
return 0;
}
- if (likely(is_multicast_ether_addr(ð->d_addr))) {
+ if (likely(rte_is_multicast_ether_addr(ð->d_addr))) {
/* allow all multicast packets */
return 0;
}
unsigned int i;
for (i = 0; i < avp->num_rx_queues; i++) {
- if (data->rx_queues[i] == rxq)
+ if (data->rx_queues[i] == rxq) {
+ rte_free(data->rx_queues[i]);
data->rx_queues[i] = NULL;
+ }
+ }
+}
+
+static void
+avp_dev_rx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ if (data->rx_queues[i]) {
+ rte_free(data->rx_queues[i]);
+ data->rx_queues[i] = NULL;
+ }
}
}
unsigned int i;
for (i = 0; i < avp->num_tx_queues; i++) {
- if (data->tx_queues[i] == txq)
+ if (data->tx_queues[i] == txq) {
+ rte_free(data->tx_queues[i]);
data->tx_queues[i] = NULL;
+ }
+ }
+}
+
+static void
+avp_dev_tx_queue_release_all(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ if (data->tx_queues[i]) {
+ rte_free(data->tx_queues[i]);
+ data->tx_queues[i] = NULL;
+ }
}
}
goto unlock;
}
- /* disable features that we do not support */
- eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
- eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
- eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
-
/* update link state */
ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
if (ret < 0) {
/* continue */
}
+ /* release dynamic storage for rx/tx queues */
+ avp_dev_rx_queue_release_all(eth_dev);
+ avp_dev_tx_queue_release_all(eth_dev);
+
unlock:
rte_spinlock_unlock(&avp->lock);
}
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
dev_info->max_rx_queues = avp->max_rx_queues;
dev_info->max_tx_queues = avp->max_tx_queues;
dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
{
struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf;
+ uint64_t offloads = dev_conf->rxmode.offloads;
if (mask & ETH_VLAN_STRIP_MASK) {
if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
else
avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
}
stats->q_opackets[i] += txq->packets;
stats->q_obytes[i] += txq->bytes;
- stats->q_errors[i] += txq->errors;
}
}
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
+
+RTE_INIT(avp_init_log)
+{
+ avp_logtype_driver = rte_log_register("pmd.net.avp.driver");
+ if (avp_logtype_driver >= 0)
+ rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE);
+}