/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2019 NXP
+ * Copyright 2018-2019 NXP
*/
+#include <sys/ioctl.h>
+#include <sys/epoll.h>
#include <rte_kvargs.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_vdev.h>
#include <rte_bus_vdev.h>
+#include <rte_ether.h>
#include <dpaa_of.h>
#include "pfe_logs.h"
#include "pfe_mod.h"
-#define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
+#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
#define PFE_VDEV_GEM_ID_ARG "intf"
struct pfe_vdev_init_params {
int8_t gem_id;
};
static struct pfe *g_pfe;
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
/* TODO: make pfe_svr a runtime option.
* Driver should be able to get the SVR
static void *cbus_emac_base[3];
static void *cbus_gpi_base[3];
-int pfe_logtype_pmd;
-
/* pfe_gemac_init
*/
static int
return 0;
}
+static uint16_t
+pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct epoll_event epoll_ev;
+ uint64_t ticks = 1; /* 1 msec */
+ int ret;
+ int have_something, work_done;
+
+#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
+ work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
+ rx_pkts, nb_pkts);
+
+ if (!have_something || !work_done) {
+ writel(RESET_STATUS, HIF_INT_SRC);
+ writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
+ ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
+ if (ret < 0 && errno != EINTR)
+ PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
+ }
+
+ return work_done;
+}
+
+static uint16_t
+pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_mempool *pool;
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ pfe_hif_rx_process(priv->pfe, nb_pkts);
+ pool = priv->pfe->hif.shm->pool;
+
+ return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
+}
+
+static uint16_t
+pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_tx_queue *queue = tx_queue;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (tx_pkts[i]->nb_segs > 1) {
+ struct rte_mbuf *mbuf;
+ int j;
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
+ tx_pkts[i]);
+
+ mbuf = tx_pkts[i]->next;
+ for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, 0x0, mbuf);
+ mbuf = mbuf->next;
+ }
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+ mbuf);
+ } else {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->pkt_len, 0 /*ctrl*/,
+ HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
+ HIF_DATA_VALID,
+ tx_pkts[i]);
+ }
+ stats->obytes += tx_pkts[i]->pkt_len;
+ hif_tx_dma_start();
+ }
+ stats->opackets += nb_pkts;
+ pfe_tx_do_cleanup(priv->pfe);
+
+ return nb_pkts;
+}
+
+static uint16_t
+pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+pfe_dummy_recv_pkts(__rte_unused void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
static int
pfe_eth_open(struct rte_eth_dev *dev)
{
" failed", client->id);
goto err0;
}
+ } else {
+ /* Freeing the packets if already exists */
+ int ret = 0;
+ struct rte_mbuf *rx_pkts[32];
+ /* TODO multiqueue support */
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool, rx_pkts, 32);
+ while (ret) {
+ int i;
+ for (i = 0; i < ret; i++)
+ rte_pktmbuf_free(rx_pkts[i]);
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool,
+ rx_pkts, 32);
+ }
}
} else {
/* Register client driver with HIF */
}
}
rc = pfe_eth_start(priv);
+ dev->rx_pkt_burst = &pfe_recv_pkts;
+ dev->tx_pkt_burst = &pfe_xmit_pkts;
+ /* If no prefetch is configured. */
+ if (getenv("PFE_INTR_SUPPORT")) {
+ dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
+ PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
+ }
+
err0:
return rc;
}
}
-static void
+static int
pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
{
struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ dev->data->dev_started = 0;
+
gemac_disable(priv->EMAC_baseaddr);
gpi_disable(priv->GPI_baseaddr);
-}
-static void
-pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
-{
- PMD_INIT_FUNC_TRACE();
-
- pfe_eth_stop(dev);
- /* Close the device file for link status */
- pfe_eth_close_cdev(dev->data->dev_private);
+ dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
+ dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
- rte_free(dev->data->mac_addrs);
- rte_eth_dev_release_port(dev);
- pfe->nb_devs--;
+ return 0;
}
-static void
+static int
pfe_eth_close(struct rte_eth_dev *dev)
{
+ int ret;
+ PMD_INIT_FUNC_TRACE();
+
if (!dev)
- return;
+ return -1;
if (!g_pfe)
- return;
+ return -1;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
- pfe_eth_exit(dev, g_pfe);
+ ret = pfe_eth_stop(dev);
+ /* Close the device file for link status */
+ pfe_eth_close_cdev(dev->data->dev_private);
+
+ munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
+ g_pfe->nb_devs--;
if (g_pfe->nb_devs == 0) {
pfe_hif_exit(g_pfe);
rte_free(g_pfe);
g_pfe = NULL;
}
+
+ return ret;
}
static int
pfe_eth_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- struct pfe_eth_priv_s *internals = dev->data->dev_private;
-
- dev_info->if_index = internals->id;
dev_info->max_mac_addrs = PFE_MAX_MACS;
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
- if (pfe_svr == SVR_LS1012A_REV1)
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup;
+ if (pfe_svr == SVR_LS1012A_REV1) {
dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
- else
+ dev_info->max_mtu = MAX_MTU_ON_REV1;
+ } else {
dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
+ dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
+ }
+
+ return 0;
+}
+
+/* Only first mb_pool given on first call of this API will be used
+ * in whole system, also nb_rx_desc and rx_conf are unused params
+ */
+static int
+pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ int rc = 0;
+ struct pfe *pfe;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ pfe = priv->pfe;
+
+ if (queue_idx >= EMAC_RXQ_CNT) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, EMAC_RXQ_CNT);
+ return -1;
+ }
+
+ if (!pfe->hif.setuped) {
+ rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
+ if (rc) {
+ PFE_PMD_ERR("Could not allocate buffer descriptors");
+ return -1;
+ }
+
+ pfe->hif.shm->pool = mb_pool;
+ if (pfe_hif_init_buffers(&pfe->hif)) {
+ PFE_PMD_ERR("Could not initialize buffer descriptors");
+ return -1;
+ }
+ hif_init();
+ hif_rx_enable();
+ hif_tx_enable();
+ pfe->hif.setuped = 1;
+ }
+ dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
+ priv->client.rx_q[queue_idx].queue_id = queue_idx;
+
+ return 0;
+}
+
+static void
+pfe_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+pfe_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+pfe_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ if (queue_idx >= emac_txq_cnt) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, emac_txq_cnt);
+ return -1;
+ }
+ dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
+ priv->client.tx_q[queue_idx].queue_id = queue_idx;
+ return 0;
+}
+
+static const uint32_t *
+pfe_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ if (dev->rx_pkt_burst == pfe_recv_pkts ||
+ dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
+ return ptypes;
+ return NULL;
+}
+
+static inline int
+pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static inline int
+pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ int ret, ioctl_cmd = 0;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct rte_eth_link link, old;
+ unsigned int lstatus = 1;
+
+ if (dev == NULL) {
+ PFE_PMD_ERR("Invalid device in link_update.\n");
+ return 0;
+ }
+
+ memset(&old, 0, sizeof(old));
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ pfe_eth_atomic_read_link_status(dev, &old);
+
+ /* Read from PFE CDEV, status of link, if file was successfully
+ * opened.
+ */
+ if (priv->link_fd != PFE_CDEV_INVALID_FD) {
+ if (priv->id == 0)
+ ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
+ if (priv->id == 1)
+ ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
+
+ ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
+ if (ret != 0) {
+ PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
+ /* use dummy link value */
+ link.link_status = 1;
+ }
+ PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
+ lstatus, priv->id);
+ }
+
+ if (old.link_status == lstatus) {
+ /* no change in status */
+ PFE_PMD_DEBUG("No change in link status; Not updating.\n");
+ return -1;
+ }
+
+ link.link_status = lstatus;
+ link.link_speed = ETH_LINK_SPEED_1G;
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
+
+ pfe_eth_atomic_write_link_status(dev, &link);
+
+ PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
+ link.link_status ? "up" : "down");
+
+ return 0;
+}
+
+static int
+pfe_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ priv->promisc = 1;
+ dev->data->promiscuous = 1;
+ gemac_enable_copy_all(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static int
+pfe_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ priv->promisc = 0;
+ dev->data->promiscuous = 0;
+ gemac_disable_copy_all(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static int
+pfe_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct pfe_mac_addr hash_addr; /* hash register structure */
+
+ /* Set the hash to rx all multicast frames */
+ hash_addr.bottom = 0xFFFFFFFF;
+ hash_addr.top = 0xFFFFFFFF;
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ dev->data->all_multicast = 1;
+
+ return 0;
+}
+
+static int
+pfe_link_down(struct rte_eth_dev *dev)
+{
+ return pfe_eth_stop(dev);
+}
+
+static int
+pfe_link_up(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ pfe_eth_start(priv);
+ return 0;
+}
+
+static int
+pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ int ret;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ /*TODO Support VLAN*/
+ ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
+ if (!ret)
+ dev->data->mtu = mtu;
+
+ return ret;
+}
+
+/* pfe_eth_enet_addr_byte_mac
+ */
+static int
+pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
+ struct pfe_mac_addr *enet_addr)
+{
+ if (!enet_byte_addr || !enet_addr) {
+ return -1;
+
+ } else {
+ enet_addr->bottom = enet_byte_addr[0] |
+ (enet_byte_addr[1] << 8) |
+ (enet_byte_addr[2] << 16) |
+ (enet_byte_addr[3] << 24);
+ enet_addr->top = enet_byte_addr[4] |
+ (enet_byte_addr[5] << 8);
+ return 0;
+ }
+}
+
+static int
+pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct pfe_mac_addr spec_addr;
+ int ret;
+
+ ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
+ if (ret)
+ return ret;
+
+ gemac_set_laddrN(priv->EMAC_baseaddr,
+ (struct pfe_mac_addr *)&spec_addr, 1);
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+ return 0;
+}
+
+static int
+pfe_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct rte_eth_stats *eth_stats = &priv->stats;
+
+ if (stats == NULL)
+ return -1;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+
+ stats->ipackets = eth_stats->ipackets;
+ stats->ibytes = eth_stats->ibytes;
+ stats->opackets = eth_stats->opackets;
+ stats->obytes = eth_stats->obytes;
return 0;
}
.dev_close = pfe_eth_close,
.dev_configure = pfe_eth_configure,
.dev_infos_get = pfe_eth_info,
+ .rx_queue_setup = pfe_rx_queue_setup,
+ .rx_queue_release = pfe_rx_queue_release,
+ .tx_queue_setup = pfe_tx_queue_setup,
+ .tx_queue_release = pfe_tx_queue_release,
+ .dev_supported_ptypes_get = pfe_supported_ptypes_get,
+ .link_update = pfe_eth_link_update,
+ .promiscuous_enable = pfe_promiscuous_enable,
+ .promiscuous_disable = pfe_promiscuous_disable,
+ .allmulticast_enable = pfe_allmulticast_enable,
+ .dev_set_link_down = pfe_link_down,
+ .dev_set_link_up = pfe_link_up,
+ .mtu_set = pfe_mtu_set,
+ .mac_addr_set = pfe_dev_set_mac_addr,
+ .stats_get = pfe_stats_get,
};
static int
struct pfe_eth_priv_s *priv = NULL;
struct ls1012a_eth_platform_data *einfo;
struct ls1012a_pfe_platform_data *pfe_info;
+ struct rte_ether_addr addr;
int err;
eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
goto err0;
}
+ memcpy(addr.addr_bytes, priv->einfo->mac_addr,
+ ETH_ALEN);
+
+ pfe_dev_set_mac_addr(eth_dev, &addr);
+ rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]);
+
eth_dev->data->mtu = 1500;
eth_dev->dev_ops = &ops;
- pfe_eth_stop(eth_dev);
+ err = pfe_eth_stop(eth_dev);
+ if (err != 0)
+ goto err0;
pfe_gemac_init(priv);
eth_dev->data->nb_rx_queues = 1;
eth_dev->data->nb_tx_queues = 1;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* For link status, open the PFE CDEV; Error from this function
* is silently ignored; In case of error, the link status will not
* be available.
if (rc < 0)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Initializing pmd_pfe for %s Given gem-id %d\n",
+ PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
name, init_params.gem_id);
if (g_pfe) {
else
gem_id = init_params.gem_id;
- RTE_LOG(INFO, PMD, "Init pmd_pfe for %s gem-id %d(given =%d)\n",
+ PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
name, gem_id, init_params.gem_id);
rc = pfe_eth_init(vdev, g_pfe, gem_id);
{
const char *name;
struct rte_eth_dev *eth_dev = NULL;
+ int ret = 0;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return 0;
eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev == NULL)
- return -ENODEV;
-
- pfe_eth_exit(eth_dev, g_pfe);
- munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
-
- if (g_pfe->nb_devs == 0) {
- pfe_hif_exit(g_pfe);
- pfe_hif_lib_exit(g_pfe);
- rte_free(g_pfe);
- g_pfe = NULL;
+ if (eth_dev) {
+ pfe_eth_close(eth_dev);
+ ret = rte_eth_dev_release_port(eth_dev);
}
- return 0;
+
+ return ret;
}
static
RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
-
-RTE_INIT(pfe_pmd_init_log)
-{
- pfe_logtype_pmd = rte_log_register("pmd.net.pfe");
- if (pfe_logtype_pmd >= 0)
- rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(pfe_logtype_pmd, pmd.net.pfe, NOTICE);