X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Fkni%2Fkni_net.c;h=db9f4898910a0ec21eab427c073c1b670350d687;hb=98a7ea332ba3da0f74ec951595d36a616165b255;hp=ed62c3d4910bc7eb11e5aa7b3d3a602921644a3e;hpb=b23ffbaa821bc5676e7f4d60c717f445b85f990d;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/kni/kni_net.c b/lib/librte_eal/linuxapp/kni/kni_net.c index ed62c3d491..db9f489891 100644 --- a/lib/librte_eal/linuxapp/kni/kni_net.c +++ b/lib/librte_eal/linuxapp/kni/kni_net.c @@ -1,23 +1,23 @@ /*- * GPL LICENSE SUMMARY - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. - * + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. - * + * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. - * + * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. - * + * * Contact Information: * Intel Corporation */ @@ -36,30 +36,111 @@ #include #include -#include #include #include + +#include "compat.h" #include "kni_dev.h" #define WD_TIMEOUT 5 /*jiffies */ -#define MBUF_BURST_SZ 32 - #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */ /* typedef for rx function */ typedef void (*kni_net_rx_t)(struct kni_dev *kni); -static int kni_net_tx(struct sk_buff *skb, struct net_device *dev); static void kni_net_rx_normal(struct kni_dev *kni); -static void kni_net_rx_lo_fifo(struct kni_dev *kni); -static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni); -static int kni_net_process_request(struct kni_dev *kni, - struct rte_kni_request *req); /* kni rx function pointer, with default to normal rx */ static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal; +/* physical address to kernel virtual address */ +static void * +pa2kva(void *pa) +{ + return phys_to_virt((unsigned long)pa); +} + +/* physical address to virtual address */ +static void * +pa2va(void *pa, struct rte_kni_mbuf *m) +{ + void *va; + + va = (void *)((unsigned long)pa + + (unsigned long)m->buf_addr - + (unsigned long)m->buf_physaddr); + return va; +} + +/* mbuf data kernel virtual address from mbuf kernel virtual address */ +static void * +kva2data_kva(struct rte_kni_mbuf *m) +{ + return phys_to_virt(m->buf_physaddr + m->data_off); +} + +/* virtual address to physical address */ +static void * +va2pa(void *va, struct rte_kni_mbuf *m) +{ + void *pa; + + pa = (void *)((unsigned long)va - + ((unsigned long)m->buf_addr - + (unsigned long)m->buf_physaddr)); + return pa; +} + +/* + * It can be called to process the request. + */ +static int +kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req) +{ + int ret = -1; + void *resp_va; + uint32_t num; + int ret_val; + + if (!kni || !req) { + pr_err("No kni instance or request\n"); + return -EINVAL; + } + + mutex_lock(&kni->sync_lock); + + /* Construct data */ + memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request)); + num = kni_fifo_put(kni->req_q, &kni->sync_va, 1); + if (num < 1) { + pr_err("Cannot send to req_q\n"); + ret = -EBUSY; + goto fail; + } + + ret_val = wait_event_interruptible_timeout(kni->wq, + kni_fifo_count(kni->resp_q), 3 * HZ); + if (signal_pending(current) || ret_val <= 0) { + ret = -ETIME; + goto fail; + } + num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1); + if (num != 1 || resp_va != kni->sync_va) { + /* This should never happen */ + pr_err("No data in resp_q\n"); + ret = -ENODATA; + goto fail; + } + + memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request)); + ret = 0; + +fail: + mutex_unlock(&kni->sync_lock); + return ret; +} + /* * Open and close */ @@ -70,15 +151,6 @@ kni_net_open(struct net_device *dev) struct rte_kni_request req; struct kni_dev *kni = netdev_priv(dev); - if (kni->lad_dev) - memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN); - else - /* - * Generate random mac address. eth_random_addr() is the newer - * version of generating mac address in linux kernel. - */ - random_ether_addr(dev->dev_addr); - netif_start_queue(dev); memset(&req, 0, sizeof(req)); @@ -88,7 +160,7 @@ kni_net_open(struct net_device *dev) req.if_up = 1; ret = kni_net_process_request(kni, &req); - return (ret == 0 ? req.result : ret); + return (ret == 0) ? req.result : ret; } static int @@ -107,7 +179,7 @@ kni_net_release(struct net_device *dev) req.if_up = 0; ret = kni_net_process_request(kni, &req); - return (ret == 0 ? req.result : ret); + return (ret == 0) ? req.result : ret; } /* @@ -123,75 +195,170 @@ kni_net_config(struct net_device *dev, struct ifmap *map) return 0; } +/* + * Transmit a packet (called by the kernel) + */ +static int +kni_net_tx(struct sk_buff *skb, struct net_device *dev) +{ + int len = 0; + uint32_t ret; + struct kni_dev *kni = netdev_priv(dev); + struct rte_kni_mbuf *pkt_kva = NULL; + void *pkt_pa = NULL; + void *pkt_va = NULL; + + /* save the timestamp */ +#ifdef HAVE_TRANS_START_HELPER + netif_trans_update(dev); +#else + dev->trans_start = jiffies; +#endif + + /* Check if the length of skb is less than mbuf size */ + if (skb->len > kni->mbuf_size) + goto drop; + + /** + * Check if it has at least one free entry in tx_q and + * one entry in alloc_q. + */ + if (kni_fifo_free_count(kni->tx_q) == 0 || + kni_fifo_count(kni->alloc_q) == 0) { + /** + * If no free entry in tx_q or no entry in alloc_q, + * drops skb and goes out. + */ + goto drop; + } + + /* dequeue a mbuf from alloc_q */ + ret = kni_fifo_get(kni->alloc_q, &pkt_pa, 1); + if (likely(ret == 1)) { + void *data_kva; + + pkt_kva = pa2kva(pkt_pa); + data_kva = kva2data_kva(pkt_kva); + pkt_va = pa2va(pkt_pa, pkt_kva); + + len = skb->len; + memcpy(data_kva, skb->data, len); + if (unlikely(len < ETH_ZLEN)) { + memset(data_kva + len, 0, ETH_ZLEN - len); + len = ETH_ZLEN; + } + pkt_kva->pkt_len = len; + pkt_kva->data_len = len; + + /* enqueue mbuf into tx_q */ + ret = kni_fifo_put(kni->tx_q, &pkt_va, 1); + if (unlikely(ret != 1)) { + /* Failing should not happen */ + pr_err("Fail to enqueue mbuf into tx_q\n"); + goto drop; + } + } else { + /* Failing should not happen */ + pr_err("Fail to dequeue mbuf from alloc_q\n"); + goto drop; + } + + /* Free skb and update statistics */ + dev_kfree_skb(skb); + kni->stats.tx_bytes += len; + kni->stats.tx_packets++; + + return NETDEV_TX_OK; + +drop: + /* Free skb and update statistics */ + dev_kfree_skb(skb); + kni->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + /* * RX: normal working mode */ static void kni_net_rx_normal(struct kni_dev *kni) { - unsigned ret; + uint32_t ret; uint32_t len; - unsigned i, num, num_rq, num_fq; + uint32_t i, num_rx, num_fq; struct rte_kni_mbuf *kva; - struct rte_kni_mbuf *va[MBUF_BURST_SZ]; - void * data_kva; - + void *data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; - /* Get the number of entries in rx_q */ - num_rq = kni_fifo_count(kni->rx_q); - /* Get the number of free entries in free_q */ num_fq = kni_fifo_free_count(kni->free_q); - - /* Calculate the number of entries to dequeue in rx_q */ - num = min(num_rq, num_fq); - num = min(num, (unsigned)MBUF_BURST_SZ); - - /* Return if no entry in rx_q and no free entry in free_q */ - if (num == 0) + if (num_fq == 0) { + /* No room on the free_q, bail out */ return; + } + + /* Calculate the number of entries to dequeue from rx_q */ + num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ); /* Burst dequeue from rx_q */ - ret = kni_fifo_get(kni->rx_q, (void **)va, num); - if (ret == 0) - return; /* Failing should not happen */ + num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx); + if (num_rx == 0) + return; /* Transfer received packets to netif */ - for (i = 0; i < num; i++) { - kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; - len = kva->data_len; - data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva; + for (i = 0; i < num_rx; i++) { + kva = pa2kva(kni->pa[i]); + len = kva->pkt_len; + data_kva = kva2data_kva(kva); + kni->va[i] = pa2va(kni->pa[i], kva); skb = dev_alloc_skb(len + 2); if (!skb) { - KNI_ERR("Out of mem, dropping pkts\n"); /* Update statistics */ kni->stats.rx_dropped++; + continue; } - else { - /* Align IP on 16B boundary */ - skb_reserve(skb, 2); + + /* Align IP on 16B boundary */ + skb_reserve(skb, 2); + + if (kva->nb_segs == 1) { memcpy(skb_put(skb, len), data_kva, len); - skb->dev = dev; - skb->protocol = eth_type_trans(skb, dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + int nb_segs; + int kva_nb_segs = kva->nb_segs; - /* Call netif interface */ - netif_receive_skb(skb); + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { + memcpy(skb_put(skb, kva->data_len), + data_kva, kva->data_len); - /* Update statistics */ - kni->stats.rx_bytes += len; - kni->stats.rx_packets++; + if (!kva->next) + break; + + kva = pa2kva(va2pa(kva->next, kva)); + data_kva = kva2data_kva(kva); + } } + + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* Call netif interface */ + netif_rx_ni(skb); + + /* Update statistics */ + kni->stats.rx_bytes += len; + kni->stats.rx_packets++; } /* Burst enqueue mbufs into free_q */ - ret = kni_fifo_put(kni->free_q, (void **)va, num); - if (ret != num) + ret = kni_fifo_put(kni->free_q, kni->va, num_rx); + if (ret != num_rx) /* Failing should not happen */ - KNI_ERR("Fail to enqueue entries into free_q\n"); + pr_err("Fail to enqueue entries into free_q\n"); } /* @@ -200,15 +367,12 @@ kni_net_rx_normal(struct kni_dev *kni) static void kni_net_rx_lo_fifo(struct kni_dev *kni) { - unsigned ret; + uint32_t ret; uint32_t len; - unsigned i, num, num_rq, num_tq, num_aq, num_fq; + uint32_t i, num, num_rq, num_tq, num_aq, num_fq; struct rte_kni_mbuf *kva; - struct rte_kni_mbuf *va[MBUF_BURST_SZ]; - void * data_kva; - + void *data_kva; struct rte_kni_mbuf *alloc_kva; - struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ]; void *alloc_data_kva; /* Get the number of entries in rx_q */ @@ -227,32 +391,32 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) num = min(num_rq, num_tq); num = min(num, num_aq); num = min(num, num_fq); - num = min(num, (unsigned)MBUF_BURST_SZ); + num = min_t(uint32_t, num, MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue from rx_q */ - ret = kni_fifo_get(kni->rx_q, (void **)va, num); + ret = kni_fifo_get(kni->rx_q, kni->pa, num); if (ret == 0) return; /* Failing should not happen */ /* Dequeue entries from alloc_q */ - ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num); + ret = kni_fifo_get(kni->alloc_q, kni->alloc_pa, num); if (ret) { num = ret; /* Copy mbufs */ for (i = 0; i < num; i++) { - kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; + kva = pa2kva(kni->pa[i]); len = kva->pkt_len; - data_kva = kva->data - kni->mbuf_va + - kni->mbuf_kva; + data_kva = kva2data_kva(kva); + kni->va[i] = pa2va(kni->pa[i], kva); + + alloc_kva = pa2kva(kni->alloc_pa[i]); + alloc_data_kva = kva2data_kva(alloc_kva); + kni->alloc_va[i] = pa2va(kni->alloc_pa[i], alloc_kva); - alloc_kva = (void *)alloc_va[i] - kni->mbuf_va + - kni->mbuf_kva; - alloc_data_kva = alloc_kva->data - kni->mbuf_va + - kni->mbuf_kva; memcpy(alloc_data_kva, data_kva, len); alloc_kva->pkt_len = len; alloc_kva->data_len = len; @@ -262,17 +426,17 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) } /* Burst enqueue mbufs into tx_q */ - ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num); + ret = kni_fifo_put(kni->tx_q, kni->alloc_va, num); if (ret != num) /* Failing should not happen */ - KNI_ERR("Fail to enqueue mbufs into tx_q\n"); + pr_err("Fail to enqueue mbufs into tx_q\n"); } /* Burst enqueue mbufs into free_q */ - ret = kni_fifo_put(kni->free_q, (void **)va, num); + ret = kni_fifo_put(kni->free_q, kni->va, num); if (ret != num) /* Failing should not happen */ - KNI_ERR("Fail to enqueue mbufs into free_q\n"); + pr_err("Fail to enqueue mbufs into free_q\n"); /** * Update statistic, and enqueue/dequeue failure is impossible, @@ -288,13 +452,11 @@ kni_net_rx_lo_fifo(struct kni_dev *kni) static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni) { - unsigned ret; + uint32_t ret; uint32_t len; - unsigned i, num_rq, num_fq, num; + uint32_t i, num_rq, num_fq, num; struct rte_kni_mbuf *kva; - struct rte_kni_mbuf *va[MBUF_BURST_SZ]; - void * data_kva; - + void *data_kva; struct sk_buff *skb; struct net_device *dev = kni->net_dev; @@ -306,32 +468,30 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni) /* Calculate the number of entries to dequeue from rx_q */ num = min(num_rq, num_fq); - num = min(num, (unsigned)MBUF_BURST_SZ); + num = min_t(uint32_t, num, MBUF_BURST_SZ); /* Return if no entry to dequeue from rx_q */ if (num == 0) return; /* Burst dequeue mbufs from rx_q */ - ret = kni_fifo_get(kni->rx_q, (void **)va, num); + ret = kni_fifo_get(kni->rx_q, kni->pa, num); if (ret == 0) return; /* Copy mbufs to sk buffer and then call tx interface */ for (i = 0; i < num; i++) { - kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva; - len = kva->data_len; - data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva; + kva = pa2kva(kni->pa[i]); + len = kva->pkt_len; + data_kva = kva2data_kva(kva); + kni->va[i] = pa2va(kni->pa[i], kva); skb = dev_alloc_skb(len + 2); - if (skb == NULL) - KNI_ERR("Out of mem, dropping pkts\n"); - else { + if (skb) { /* Align IP on 16B boundary */ skb_reserve(skb, 2); memcpy(skb_put(skb, len), data_kva, len); skb->dev = dev; - skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; dev_kfree_skb(skb); } @@ -339,30 +499,46 @@ kni_net_rx_lo_fifo_skb(struct kni_dev *kni) /* Simulate real usage, allocate/copy skb twice */ skb = dev_alloc_skb(len + 2); if (skb == NULL) { - KNI_ERR("Out of mem, dropping pkts\n"); kni->stats.rx_dropped++; + continue; } - else { - /* Align IP on 16B boundary */ - skb_reserve(skb, 2); + + /* Align IP on 16B boundary */ + skb_reserve(skb, 2); + + if (kva->nb_segs == 1) { memcpy(skb_put(skb, len), data_kva, len); - skb->dev = dev; - skb->protocol = eth_type_trans(skb, dev); - skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + int nb_segs; + int kva_nb_segs = kva->nb_segs; - kni->stats.rx_bytes += len; - kni->stats.rx_packets++; + for (nb_segs = 0; nb_segs < kva_nb_segs; nb_segs++) { + memcpy(skb_put(skb, kva->data_len), + data_kva, kva->data_len); - /* call tx interface */ - kni_net_tx(skb, dev); + if (!kva->next) + break; + + kva = pa2kva(va2pa(kva->next, kva)); + data_kva = kva2data_kva(kva); + } } + + skb->dev = dev; + skb->ip_summed = CHECKSUM_UNNECESSARY; + + kni->stats.rx_bytes += len; + kni->stats.rx_packets++; + + /* call tx interface */ + kni_net_tx(skb, dev); } /* enqueue all the mbufs from rx_q into free_q */ - ret = kni_fifo_put(kni->free_q, (void **)&va, num); + ret = kni_fifo_put(kni->free_q, kni->va, num); if (ret != num) /* Failing should not happen */ - KNI_ERR("Fail to enqueue mbufs into free_q\n"); + pr_err("Fail to enqueue mbufs into free_q\n"); } /* rx interface */ @@ -376,109 +552,19 @@ kni_net_rx(struct kni_dev *kni) (*kni_net_rx_func)(kni); } -/* - * Transmit a packet (called by the kernel) - */ -#ifdef RTE_KNI_VHOST -static int -kni_net_tx(struct sk_buff *skb, struct net_device *dev) -{ - struct kni_dev *kni = netdev_priv(dev); - - dev_kfree_skb(skb); - kni->stats.tx_dropped++; - - return NETDEV_TX_OK; -} -#else -static int -kni_net_tx(struct sk_buff *skb, struct net_device *dev) -{ - int len = 0; - unsigned ret; - struct kni_dev *kni = netdev_priv(dev); - struct rte_kni_mbuf *pkt_kva = NULL; - struct rte_kni_mbuf *pkt_va = NULL; - - dev->trans_start = jiffies; /* save the timestamp */ - - /* Check if the length of skb is less than mbuf size */ - if (skb->len > kni->mbuf_size) - goto drop; - - /** - * Check if it has at least one free entry in tx_q and - * one entry in alloc_q. - */ - if (kni_fifo_free_count(kni->tx_q) == 0 || - kni_fifo_count(kni->alloc_q) == 0) { - /** - * If no free entry in tx_q or no entry in alloc_q, - * drops skb and goes out. - */ - goto drop; - } - - /* dequeue a mbuf from alloc_q */ - ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1); - if (likely(ret == 1)) { - void *data_kva; - - pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva; - data_kva = pkt_kva->data - kni->mbuf_va + kni->mbuf_kva; - - len = skb->len; - memcpy(data_kva, skb->data, len); - if (unlikely(len < ETH_ZLEN)) { - memset(data_kva + len, 0, ETH_ZLEN - len); - len = ETH_ZLEN; - } - pkt_kva->pkt_len = len; - pkt_kva->data_len = len; - - /* enqueue mbuf into tx_q */ - ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1); - if (unlikely(ret != 1)) { - /* Failing should not happen */ - KNI_ERR("Fail to enqueue mbuf into tx_q\n"); - goto drop; - } - } else { - /* Failing should not happen */ - KNI_ERR("Fail to dequeue mbuf from alloc_q\n"); - goto drop; - } - - /* Free skb and update statistics */ - dev_kfree_skb(skb); - kni->stats.tx_bytes += len; - kni->stats.tx_packets++; - - return NETDEV_TX_OK; - -drop: - /* Free skb and update statistics */ - dev_kfree_skb(skb); - kni->stats.tx_dropped++; - - return NETDEV_TX_OK; -} -#endif - /* * Deal with a transmit timeout. */ static void -kni_net_tx_timeout (struct net_device *dev) +kni_net_tx_timeout(struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); - KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies, - jiffies - dev->trans_start); + pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies, + jiffies - dev_trans_start(dev)); kni->stats.tx_errors++; netif_wake_queue(dev); - return; } /* @@ -487,12 +573,17 @@ kni_net_tx_timeout (struct net_device *dev) static int kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { - KNI_DBG("kni_net_ioctl %d\n", - ((struct kni_dev *)netdev_priv(dev))->group_id); + pr_debug("kni_net_ioctl group:%d cmd:%d\n", + ((struct kni_dev *)netdev_priv(dev))->group_id, cmd); return 0; } +static void +kni_net_set_rx_mode(struct net_device *dev) +{ +} + static int kni_net_change_mtu(struct net_device *dev, int new_mtu) { @@ -500,7 +591,7 @@ kni_net_change_mtu(struct net_device *dev, int new_mtu) struct rte_kni_request req; struct kni_dev *kni = netdev_priv(dev); - KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu); + pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu); memset(&req, 0, sizeof(req)); req.req_id = RTE_KNI_REQ_CHANGE_MTU; @@ -509,7 +600,7 @@ kni_net_change_mtu(struct net_device *dev, int new_mtu) if (ret == 0 && req.result == 0) dev->mtu = new_mtu; - return (ret == 0 ? req.result : ret); + return (ret == 0) ? req.result : ret; } /* @@ -522,55 +613,6 @@ kni_net_poll_resp(struct kni_dev *kni) wake_up_interruptible(&kni->wq); } -/* - * It can be called to process the request. - */ -static int -kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req) -{ - int ret = -1; - void *resp_va; - unsigned num; - int ret_val; - - if (!kni || !req) { - KNI_ERR("No kni instance or request\n"); - return -EINVAL; - } - - mutex_lock(&kni->sync_lock); - - /* Construct data */ - memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request)); - num = kni_fifo_put(kni->req_q, &kni->sync_va, 1); - if (num < 1) { - KNI_ERR("Cannot send to req_q\n"); - ret = -EBUSY; - goto fail; - } - - ret_val = wait_event_interruptible_timeout(kni->wq, - kni_fifo_count(kni->resp_q), 3 * HZ); - if (signal_pending(current) || ret_val <= 0) { - ret = -ETIME; - goto fail; - } - num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1); - if (num != 1 || resp_va != kni->sync_va) { - /* This should never happen */ - KNI_ERR("No data in resp_q\n"); - ret = -ENODATA; - goto fail; - } - - memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request)); - ret = 0; - -fail: - mutex_unlock(&kni->sync_lock); - return ret; -} - /* * Return statistics to the caller */ @@ -578,6 +620,7 @@ static struct net_device_stats * kni_net_stats(struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); + return &kni->stats; } @@ -587,7 +630,7 @@ kni_net_stats(struct net_device *dev) static int kni_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, - const void *saddr, unsigned int len) + const void *saddr, uint32_t len) { struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN); @@ -595,13 +638,13 @@ kni_net_header(struct sk_buff *skb, struct net_device *dev, memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len); eth->h_proto = htons(type); - return (dev->hard_header_len); + return dev->hard_header_len; } - /* * Re-fill the eth header */ +#ifdef HAVE_REBUILD_HEADER static int kni_net_rebuild_header(struct sk_buff *skb) { @@ -613,11 +656,43 @@ kni_net_rebuild_header(struct sk_buff *skb) return 0; } +#endif /* < 4.1.0 */ + +/** + * kni_net_set_mac - Change the Ethernet Address of the KNI NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int +kni_net_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + if (!is_valid_ether_addr((unsigned char *)(addr->sa_data))) + return -EADDRNOTAVAIL; + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + return 0; +} + +#ifdef HAVE_CHANGE_CARRIER_CB +static int +kni_net_change_carrier(struct net_device *dev, bool new_carrier) +{ + if (new_carrier) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + return 0; +} +#endif static const struct header_ops kni_net_header_ops = { .create = kni_net_header, +#ifdef HAVE_REBUILD_HEADER .rebuild = kni_net_rebuild_header, +#endif /* < 4.1.0 */ .cache = NULL, /* disable caching */ }; @@ -628,8 +703,13 @@ static const struct net_device_ops kni_net_netdev_ops = { .ndo_start_xmit = kni_net_tx, .ndo_change_mtu = kni_net_change_mtu, .ndo_do_ioctl = kni_net_ioctl, + .ndo_set_rx_mode = kni_net_set_rx_mode, .ndo_get_stats = kni_net_stats, .ndo_tx_timeout = kni_net_tx_timeout, + .ndo_set_mac_address = kni_net_set_mac, +#ifdef HAVE_CHANGE_CARRIER_CB + .ndo_change_carrier = kni_net_change_carrier, +#endif }; void @@ -637,8 +717,6 @@ kni_net_init(struct net_device *dev) { struct kni_dev *kni = netdev_priv(dev); - KNI_DBG("kni_net_init\n"); - init_waitqueue_head(&kni->wq); mutex_init(&kni->sync_lock); @@ -652,18 +730,18 @@ void kni_net_config_lo_mode(char *lo_str) { if (!lo_str) { - KNI_PRINT("loopback disabled"); + pr_debug("loopback disabled"); return; } if (!strcmp(lo_str, "lo_mode_none")) - KNI_PRINT("loopback disabled"); + pr_debug("loopback disabled"); else if (!strcmp(lo_str, "lo_mode_fifo")) { - KNI_PRINT("loopback mode=lo_mode_fifo enabled"); + pr_debug("loopback mode=lo_mode_fifo enabled"); kni_net_rx_func = kni_net_rx_lo_fifo; } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) { - KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled"); + pr_debug("loopback mode=lo_mode_fifo_skb enabled"); kni_net_rx_func = kni_net_rx_lo_fifo_skb; } else - KNI_PRINT("Incognizant parameter, loopback disabled"); + pr_debug("Incognizant parameter, loopback disabled"); }