#include <unistd.h>
#include <rte_string_fns.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
#include <rte_kni.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
struct pmd_queue_stats {
uint64_t pkts;
uint64_t bytes;
- uint64_t err_pkts;
};
struct pmd_queue {
struct pmd_internals {
struct rte_kni *kni;
+ uint16_t port_id;
int is_kni_started;
pthread_t thread;
};
static int is_kni_initialized;
-static int eth_kni_logtype;
+RTE_LOG_REGISTER(eth_kni_logtype, pmd.net.kni, NOTICE);
#define PMD_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, eth_kni_logtype, \
struct pmd_queue *kni_q = q;
struct rte_kni *kni = kni_q->internals->kni;
uint16_t nb_pkts;
+ int i;
nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
+ for (i = 0; i < nb_pkts; i++)
+ bufs[i]->port = kni_q->internals->port_id;
kni_q->rx.pkts += nb_pkts;
- kni_q->rx.err_pkts += nb_bufs - nb_pkts;
return nb_pkts;
}
nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
kni_q->tx.pkts += nb_pkts;
- kni_q->tx.err_pkts += nb_bufs - nb_pkts;
return nb_pkts;
}
}
if (internals->no_request_thread == 0) {
+ internals->stop_thread = 0;
+
ret = rte_ctrl_thread_create(&internals->thread,
"kni_handle_req", NULL,
kni_handle_request, internals);
return 0;
}
-static void
+static int
eth_kni_dev_stop(struct rte_eth_dev *dev)
{
struct pmd_internals *internals = dev->data->dev_private;
int ret;
- if (internals->no_request_thread == 0) {
+ if (internals->no_request_thread == 0 && internals->stop_thread == 0) {
internals->stop_thread = 1;
ret = pthread_cancel(internals->thread);
ret = pthread_join(internals->thread, NULL);
if (ret)
PMD_LOG(ERR, "Can't join the thread");
-
- internals->stop_thread = 0;
}
dev->data->dev_link.link_status = 0;
+ dev->data->dev_started = 0;
+
+ return 0;
+}
+
+static int
+eth_kni_close(struct rte_eth_dev *eth_dev)
+{
+ struct pmd_internals *internals;
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ ret = eth_kni_dev_stop(eth_dev);
+
+ /* mac_addrs must not be freed alone because part of dev_private */
+ eth_dev->data->mac_addrs = NULL;
+
+ internals = eth_dev->data->dev_private;
+ ret = rte_kni_release(internals->kni);
+ if (ret)
+ PMD_LOG(WARNING, "Not able to release kni for %s",
+ eth_dev->data->name);
+
+ return ret;
}
static int
return 0;
}
-static void
+static int
eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
dev_info->min_rx_bufsize = 0;
+
+ return 0;
}
static int
unsigned long rx_packets_total = 0, rx_bytes_total = 0;
unsigned long tx_packets_total = 0, tx_bytes_total = 0;
struct rte_eth_dev_data *data = dev->data;
- unsigned long tx_packets_err_total = 0;
unsigned int i, num_stats;
struct pmd_queue *q;
q = data->tx_queues[i];
stats->q_opackets[i] = q->tx.pkts;
stats->q_obytes[i] = q->tx.bytes;
- stats->q_errors[i] = q->tx.err_pkts;
tx_packets_total += stats->q_opackets[i];
tx_bytes_total += stats->q_obytes[i];
- tx_packets_err_total += stats->q_errors[i];
}
stats->ipackets = rx_packets_total;
stats->ibytes = rx_bytes_total;
stats->opackets = tx_packets_total;
stats->obytes = tx_bytes_total;
- stats->oerrors = tx_packets_err_total;
return 0;
}
-static void
+static int
eth_kni_stats_reset(struct rte_eth_dev *dev)
{
struct rte_eth_dev_data *data = dev->data;
q = data->tx_queues[i];
q->tx.pkts = 0;
q->tx.bytes = 0;
- q->tx.err_pkts = 0;
}
+
+ return 0;
}
static const struct eth_dev_ops eth_kni_ops = {
.dev_start = eth_kni_dev_start,
.dev_stop = eth_kni_dev_stop,
+ .dev_close = eth_kni_close,
.dev_configure = eth_kni_dev_configure,
.dev_infos_get = eth_kni_dev_info,
.rx_queue_setup = eth_kni_rx_queue_setup,
return NULL;
internals = eth_dev->data->dev_private;
+ internals->port_id = eth_dev->data->port_id;
data = eth_dev->data;
data->nb_rx_queues = 1;
data->nb_tx_queues = 1;
data->dev_link = pmd_link;
data->mac_addrs = &internals->eth_addr;
+ data->promiscuous = 1;
+ data->all_multicast = 1;
+ data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
rte_eth_random_addr(internals->eth_addr.addr_bytes);
eth_kni_remove(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *eth_dev;
- struct pmd_internals *internals;
const char *name;
int ret;
/* find the ethdev entry */
eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev == NULL)
- return -1;
-
- /* mac_addrs must not be freed alone because part of dev_private */
- eth_dev->data->mac_addrs = NULL;
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return rte_eth_dev_release_port(eth_dev);
-
- eth_kni_dev_stop(eth_dev);
-
- internals = eth_dev->data->dev_private;
- ret = rte_kni_release(internals->kni);
- if (ret)
- PMD_LOG(WARNING, "Not able to release kni for %s", name);
-
- rte_eth_dev_release_port(eth_dev);
+ if (eth_dev != NULL) {
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ ret = eth_kni_dev_stop(eth_dev);
+ if (ret != 0)
+ return ret;
+ return rte_eth_dev_release_port(eth_dev);
+ }
+ eth_kni_close(eth_dev);
+ rte_eth_dev_release_port(eth_dev);
+ }
is_kni_initialized--;
if (is_kni_initialized == 0)
RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
-
-RTE_INIT(eth_kni_init_log)
-{
- eth_kni_logtype = rte_log_register("pmd.net.kni");
- if (eth_kni_logtype >= 0)
- rte_log_set_level(eth_kni_logtype, RTE_LOG_NOTICE);
-}