-/*-
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2010-2014 Intel Corporation.
*/
/*
/* typedef for rx function */
typedef void (*kni_net_rx_t)(struct kni_dev *kni);
-static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
static void kni_net_rx_normal(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo(struct kni_dev *kni);
-static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
-static int kni_net_process_request(struct kni_dev *kni,
- struct rte_kni_request *req);
/* kni rx function pointer, with default to normal rx */
static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
return pa;
}
+/*
+ * It can be called to process the request.
+ */
+static int
+kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
+{
+ int ret = -1;
+ void *resp_va;
+ uint32_t num;
+ int ret_val;
+
+ if (!kni || !req) {
+ pr_err("No kni instance or request\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&kni->sync_lock);
+
+ /* Construct data */
+ memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
+ num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
+ if (num < 1) {
+ pr_err("Cannot send to req_q\n");
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret_val = wait_event_interruptible_timeout(kni->wq,
+ kni_fifo_count(kni->resp_q), 3 * HZ);
+ if (signal_pending(current) || ret_val <= 0) {
+ ret = -ETIME;
+ goto fail;
+ }
+ num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
+ if (num != 1 || resp_va != kni->sync_va) {
+ /* This should never happen */
+ pr_err("No data in resp_q\n");
+ ret = -ENODATA;
+ goto fail;
+ }
+
+ memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
+ ret = 0;
+
+fail:
+ mutex_unlock(&kni->sync_lock);
+ return ret;
+}
+
/*
* Open and close
*/
return 0;
}
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int
+kni_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ int len = 0;
+ uint32_t ret;
+ struct kni_dev *kni = netdev_priv(dev);
+ struct rte_kni_mbuf *pkt_kva = NULL;
+ void *pkt_pa = NULL;
+ void *pkt_va = NULL;
+
+ /* save the timestamp */
+#ifdef HAVE_TRANS_START_HELPER
+ netif_trans_update(dev);
+#else
+ dev->trans_start = jiffies;
+#endif
+
+ /* Check if the length of skb is less than mbuf size */
+ if (skb->len > kni->mbuf_size)
+ goto drop;
+
+ /**
+ * Check if it has at least one free entry in tx_q and
+ * one entry in alloc_q.
+ */
+ if (kni_fifo_free_count(kni->tx_q) == 0 ||
+ kni_fifo_count(kni->alloc_q) == 0) {
+ /**
+ * If no free entry in tx_q or no entry in alloc_q,
+ * drops skb and goes out.
+ */
+ goto drop;
+ }
+
+ /* dequeue a mbuf from alloc_q */
+ ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
+ if (likely(ret == 1)) {
+ void *data_kva;
+
+ pkt_kva = pa2kva(pkt_pa);
+ data_kva = kva2data_kva(pkt_kva);
+ pkt_va = pa2va(pkt_pa, pkt_kva);
+
+ len = skb->len;
+ memcpy(data_kva, skb->data, len);
+ if (unlikely(len < ETH_ZLEN)) {
+ memset(data_kva + len, 0, ETH_ZLEN - len);
+ len = ETH_ZLEN;
+ }
+ pkt_kva->pkt_len = len;
+ pkt_kva->data_len = len;
+
+ /* enqueue mbuf into tx_q */
+ ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
+ if (unlikely(ret != 1)) {
+ /* Failing should not happen */
+ pr_err("Fail to enqueue mbuf into tx_q\n");
+ goto drop;
+ }
+ } else {
+ /* Failing should not happen */
+ pr_err("Fail to dequeue mbuf from alloc_q\n");
+ goto drop;
+ }
+
+ /* Free skb and update statistics */
+ dev_kfree_skb(skb);
+ kni->stats.tx_bytes += len;
+ kni->stats.tx_packets++;
+
+ return NETDEV_TX_OK;
+
+drop:
+ /* Free skb and update statistics */
+ dev_kfree_skb(skb);
+ kni->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
/*
* RX: normal working mode
*/
static void
kni_net_rx_normal(struct kni_dev *kni)
{
- unsigned ret;
+ uint32_t ret;
uint32_t len;
- unsigned i, num_rx, num_fq;
+ uint32_t i, num_rx, num_fq;
struct rte_kni_mbuf *kva;
void *data_kva;
struct sk_buff *skb;
}
/* Calculate the number of entries to dequeue from rx_q */
- num_rx = min(num_fq, (unsigned)MBUF_BURST_SZ);
+ num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ);
/* Burst dequeue from rx_q */
num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);
skb = dev_alloc_skb(len + 2);
if (!skb) {
- KNI_ERR("Out of mem, dropping pkts\n");
/* Update statistics */
kni->stats.rx_dropped++;
continue;
ret = kni_fifo_put(kni->free_q, kni->va, num_rx);
if (ret != num_rx)
/* Failing should not happen */
- KNI_ERR("Fail to enqueue entries into free_q\n");
+ pr_err("Fail to enqueue entries into free_q\n");
}
/*
static void
kni_net_rx_lo_fifo(struct kni_dev *kni)
{
- unsigned ret;
+ uint32_t ret;
uint32_t len;
- unsigned i, num, num_rq, num_tq, num_aq, num_fq;
+ uint32_t i, num, num_rq, num_tq, num_aq, num_fq;
struct rte_kni_mbuf *kva;
- void * data_kva;
+ void *data_kva;
struct rte_kni_mbuf *alloc_kva;
void *alloc_data_kva;
num = min(num_rq, num_tq);
num = min(num, num_aq);
num = min(num, num_fq);
- num = min(num, (unsigned)MBUF_BURST_SZ);
+ num = min_t(uint32_t, num, MBUF_BURST_SZ);
/* Return if no entry to dequeue from rx_q */
if (num == 0)
ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num);
if (ret != num)
/* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into tx_q\n");
+ pr_err("Fail to enqueue mbufs into tx_q\n");
}
/* Burst enqueue mbufs into free_q */
ret = kni_fifo_put(kni->free_q, kni->va, num);
if (ret != num)
/* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into free_q\n");
+ pr_err("Fail to enqueue mbufs into free_q\n");
/**
* Update statistic, and enqueue/dequeue failure is impossible,
static void
kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
{
- unsigned ret;
+ uint32_t ret;
uint32_t len;
- unsigned i, num_rq, num_fq, num;
+ uint32_t i, num_rq, num_fq, num;
struct rte_kni_mbuf *kva;
void *data_kva;
struct sk_buff *skb;
/* Calculate the number of entries to dequeue from rx_q */
num = min(num_rq, num_fq);
- num = min(num, (unsigned)MBUF_BURST_SZ);
+ num = min_t(uint32_t, num, MBUF_BURST_SZ);
/* Return if no entry to dequeue from rx_q */
if (num == 0)
kni->va[i] = pa2va(kni->pa[i], kva);
skb = dev_alloc_skb(len + 2);
- if (skb == NULL)
- KNI_ERR("Out of mem, dropping pkts\n");
- else {
+ if (skb) {
/* Align IP on 16B boundary */
skb_reserve(skb, 2);
memcpy(skb_put(skb, len), data_kva, len);
/* Simulate real usage, allocate/copy skb twice */
skb = dev_alloc_skb(len + 2);
if (skb == NULL) {
- KNI_ERR("Out of mem, dropping pkts\n");
kni->stats.rx_dropped++;
continue;
}
ret = kni_fifo_put(kni->free_q, kni->va, num);
if (ret != num)
/* Failing should not happen */
- KNI_ERR("Fail to enqueue mbufs into free_q\n");
+ pr_err("Fail to enqueue mbufs into free_q\n");
}
/* rx interface */
(*kni_net_rx_func)(kni);
}
-/*
- * Transmit a packet (called by the kernel)
- */
-#ifdef RTE_KNI_VHOST
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct kni_dev *kni = netdev_priv(dev);
-
- dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-#else
-static int
-kni_net_tx(struct sk_buff *skb, struct net_device *dev)
-{
- int len = 0;
- unsigned ret;
- struct kni_dev *kni = netdev_priv(dev);
- struct rte_kni_mbuf *pkt_kva = NULL;
- void *pkt_pa = NULL;
- void *pkt_va = NULL;
-
- /* save the timestamp */
-#ifdef HAVE_TRANS_START_HELPER
- netif_trans_update(dev);
-#else
- dev->trans_start = jiffies;
-#endif
-
- /* Check if the length of skb is less than mbuf size */
- if (skb->len > kni->mbuf_size)
- goto drop;
-
- /**
- * Check if it has at least one free entry in tx_q and
- * one entry in alloc_q.
- */
- if (kni_fifo_free_count(kni->tx_q) == 0 ||
- kni_fifo_count(kni->alloc_q) == 0) {
- /**
- * If no free entry in tx_q or no entry in alloc_q,
- * drops skb and goes out.
- */
- goto drop;
- }
-
- /* dequeue a mbuf from alloc_q */
- ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1);
- if (likely(ret == 1)) {
- void *data_kva;
-
- pkt_kva = pa2kva(pkt_pa);
- data_kva = kva2data_kva(pkt_kva);
- pkt_va = pa2va(pkt_pa, pkt_kva);
-
- len = skb->len;
- memcpy(data_kva, skb->data, len);
- if (unlikely(len < ETH_ZLEN)) {
- memset(data_kva + len, 0, ETH_ZLEN - len);
- len = ETH_ZLEN;
- }
- pkt_kva->pkt_len = len;
- pkt_kva->data_len = len;
-
- /* enqueue mbuf into tx_q */
- ret = kni_fifo_put(kni->tx_q, &pkt_va, 1);
- if (unlikely(ret != 1)) {
- /* Failing should not happen */
- KNI_ERR("Fail to enqueue mbuf into tx_q\n");
- goto drop;
- }
- } else {
- /* Failing should not happen */
- KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
- goto drop;
- }
-
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- kni->stats.tx_bytes += len;
- kni->stats.tx_packets++;
-
- return NETDEV_TX_OK;
-
-drop:
- /* Free skb and update statistics */
- dev_kfree_skb(skb);
- kni->stats.tx_dropped++;
-
- return NETDEV_TX_OK;
-}
-#endif
-
/*
* Deal with a transmit timeout.
*/
static void
-kni_net_tx_timeout (struct net_device *dev)
+kni_net_tx_timeout(struct net_device *dev)
{
struct kni_dev *kni = netdev_priv(dev);
- KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies,
+ pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
jiffies - dev_trans_start(dev));
kni->stats.tx_errors++;
netif_wake_queue(dev);
- return;
}
/*
static int
kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- KNI_DBG("kni_net_ioctl %d\n",
- ((struct kni_dev *)netdev_priv(dev))->group_id);
+ pr_debug("kni_net_ioctl group:%d cmd:%d\n",
+ ((struct kni_dev *)netdev_priv(dev))->group_id, cmd);
return 0;
}
struct rte_kni_request req;
struct kni_dev *kni = netdev_priv(dev);
- KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
+ pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
memset(&req, 0, sizeof(req));
req.req_id = RTE_KNI_REQ_CHANGE_MTU;
wake_up_interruptible(&kni->wq);
}
-/*
- * It can be called to process the request.
- */
-static int
-kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
-{
- int ret = -1;
- void *resp_va;
- unsigned num;
- int ret_val;
-
- if (!kni || !req) {
- KNI_ERR("No kni instance or request\n");
- return -EINVAL;
- }
-
- mutex_lock(&kni->sync_lock);
-
- /* Construct data */
- memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
- num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
- if (num < 1) {
- KNI_ERR("Cannot send to req_q\n");
- ret = -EBUSY;
- goto fail;
- }
-
- ret_val = wait_event_interruptible_timeout(kni->wq,
- kni_fifo_count(kni->resp_q), 3 * HZ);
- if (signal_pending(current) || ret_val <= 0) {
- ret = -ETIME;
- goto fail;
- }
- num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
- if (num != 1 || resp_va != kni->sync_va) {
- /* This should never happen */
- KNI_ERR("No data in resp_q\n");
- ret = -ENODATA;
- goto fail;
- }
-
- memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
- ret = 0;
-
-fail:
- mutex_unlock(&kni->sync_lock);
- return ret;
-}
-
/*
* Return statistics to the caller
*/
kni_net_stats(struct net_device *dev)
{
struct kni_dev *kni = netdev_priv(dev);
+
return &kni->stats;
}
static int
kni_net_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
- const void *saddr, unsigned int len)
+ const void *saddr, uint32_t len)
{
struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
return dev->hard_header_len;
}
-
/*
* Re-fill the eth header
*/
*
* Returns 0 on success, negative on failure
**/
-static int kni_net_set_mac(struct net_device *netdev, void *p)
+static int
+kni_net_set_mac(struct net_device *netdev, void *p)
{
+ int ret;
+ struct rte_kni_request req;
+ struct kni_dev *kni;
struct sockaddr *addr = p;
+
+ memset(&req, 0, sizeof(req));
+ req.req_id = RTE_KNI_REQ_CHANGE_MAC_ADDR;
+
if (!is_valid_ether_addr((unsigned char *)(addr->sa_data)))
return -EADDRNOTAVAIL;
+
+ memcpy(req.mac_addr, addr->sa_data, netdev->addr_len);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- return 0;
+
+ kni = netdev_priv(netdev);
+ ret = kni_net_process_request(kni, &req);
+
+ return (ret == 0 ? req.result : ret);
}
#ifdef HAVE_CHANGE_CARRIER_CB
-static int kni_net_change_carrier(struct net_device *dev, bool new_carrier)
+static int
+kni_net_change_carrier(struct net_device *dev, bool new_carrier)
{
if (new_carrier)
netif_carrier_on(dev);
{
struct kni_dev *kni = netdev_priv(dev);
- KNI_DBG("kni_net_init\n");
-
init_waitqueue_head(&kni->wq);
mutex_init(&kni->sync_lock);
kni_net_config_lo_mode(char *lo_str)
{
if (!lo_str) {
- KNI_PRINT("loopback disabled");
+ pr_debug("loopback disabled");
return;
}
if (!strcmp(lo_str, "lo_mode_none"))
- KNI_PRINT("loopback disabled");
+ pr_debug("loopback disabled");
else if (!strcmp(lo_str, "lo_mode_fifo")) {
- KNI_PRINT("loopback mode=lo_mode_fifo enabled");
+ pr_debug("loopback mode=lo_mode_fifo enabled");
kni_net_rx_func = kni_net_rx_lo_fifo;
} else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
- KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
+ pr_debug("loopback mode=lo_mode_fifo_skb enabled");
kni_net_rx_func = kni_net_rx_lo_fifo_skb;
} else
- KNI_PRINT("Incognizant parameter, loopback disabled");
+ pr_debug("Incognizant parameter, loopback disabled");
}