4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
26 * This code is inspired from the book "Linux Device Drivers" by
27 * Alessandro Rubini and Jonathan Corbet, published by O'Reilly & Associates
30 #include <linux/device.h>
31 #include <linux/module.h>
32 #include <linux/version.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h> /* eth_type_trans */
35 #include <linux/skbuff.h>
36 #include <linux/kthread.h>
37 #include <linux/delay.h>
39 #include <rte_config.h>
40 #include <exec-env/rte_kni_common.h>
43 #define WD_TIMEOUT 5 /*jiffies */
45 #define MBUF_BURST_SZ 32
47 #define KNI_WAIT_RESPONSE_TIMEOUT 300 /* 3 seconds */
49 /* typedef for rx function */
50 typedef void (*kni_net_rx_t)(struct kni_dev *kni);
52 static int kni_net_tx(struct sk_buff *skb, struct net_device *dev);
53 static void kni_net_rx_normal(struct kni_dev *kni);
54 static void kni_net_rx_lo_fifo(struct kni_dev *kni);
55 static void kni_net_rx_lo_fifo_skb(struct kni_dev *kni);
56 static int kni_net_process_request(struct kni_dev *kni,
57 struct rte_kni_request *req);
59 /* kni rx function pointer, with default to normal rx */
60 static kni_net_rx_t kni_net_rx_func = kni_net_rx_normal;
64 * Adds num elements into the fifo. Return the number actually written
66 static inline unsigned
67 kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned num)
70 unsigned fifo_write = fifo->write;
71 unsigned fifo_read = fifo->read;
72 unsigned new_write = fifo_write;
74 for (i = 0; i < num; i++) {
75 new_write = (new_write + 1) & (fifo->len - 1);
77 if (new_write == fifo_read)
79 fifo->buffer[fifo_write] = data[i];
80 fifo_write = new_write;
82 fifo->write = fifo_write;
87 * Get up to num elements from the fifo. Return the number actully read
89 static inline unsigned
90 kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
93 unsigned new_read = fifo->read;
94 unsigned fifo_write = fifo->write;
95 for (i = 0; i < num; i++) {
96 if (new_read == fifo_write)
99 data[i] = fifo->buffer[new_read];
100 new_read = (new_read + 1) & (fifo->len - 1);
102 fifo->read = new_read;
107 * Get the num of elements in the fifo
109 static inline unsigned
110 kni_fifo_count(struct rte_kni_fifo *fifo)
112 return (fifo->len + fifo->write - fifo->read) &( fifo->len - 1);
116 * Get the num of available lements in the fifo
118 static inline unsigned
119 kni_fifo_free_count(struct rte_kni_fifo *fifo)
121 return (fifo->read - fifo->write - 1) & (fifo->len - 1);
128 kni_net_open(struct net_device *dev)
131 struct rte_kni_request req;
132 struct kni_dev *kni = netdev_priv(dev);
135 memcpy(dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
138 * Generate random mac address. eth_random_addr() is the newer
139 * version of generating mac address in linux kernel.
141 random_ether_addr(dev->dev_addr);
143 netif_start_queue(dev);
145 memset(&req, 0, sizeof(req));
146 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
148 /* Setting if_up to non-zero means up */
150 ret = kni_net_process_request(kni, &req);
152 return (ret == 0 ? req.result : ret);
156 kni_net_release(struct net_device *dev)
159 struct rte_kni_request req;
160 struct kni_dev *kni = netdev_priv(dev);
162 netif_stop_queue(dev); /* can't transmit any more */
164 memset(&req, 0, sizeof(req));
165 req.req_id = RTE_KNI_REQ_CFG_NETWORK_IF;
167 /* Setting if_up to 0 means down */
169 ret = kni_net_process_request(kni, &req);
171 return (ret == 0 ? req.result : ret);
175 * Configuration changes (passed on by ifconfig)
178 kni_net_config(struct net_device *dev, struct ifmap *map)
180 if (dev->flags & IFF_UP) /* can't act on a running interface */
183 /* ignore other fields */
188 * RX: normal working mode
191 kni_net_rx_normal(struct kni_dev *kni)
195 unsigned i, num, num_rq, num_fq;
196 struct rte_kni_mbuf *kva;
197 struct rte_kni_mbuf *va[MBUF_BURST_SZ];
201 struct net_device *dev = kni->net_dev;
203 /* Get the number of entries in rx_q */
204 num_rq = kni_fifo_count(kni->rx_q);
206 /* Get the number of free entries in free_q */
207 num_fq = kni_fifo_free_count(kni->free_q);
209 /* Calculate the number of entries to dequeue in rx_q */
210 num = min(num_rq, num_fq);
211 num = min(num, (unsigned)MBUF_BURST_SZ);
213 /* Return if no entry in rx_q and no free entry in free_q */
217 /* Burst dequeue from rx_q */
218 ret = kni_fifo_get(kni->rx_q, (void **)va, num);
220 return; /* Failing should not happen */
222 /* Transfer received packets to netif */
223 for (i = 0; i < num; i++) {
224 kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
226 data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva;
228 skb = dev_alloc_skb(len + 2);
230 KNI_ERR("Out of mem, dropping pkts\n");
231 /* Update statistics */
232 kni->stats.rx_dropped++;
235 /* Align IP on 16B boundary */
237 memcpy(skb_put(skb, len), data_kva, len);
239 skb->protocol = eth_type_trans(skb, dev);
240 skb->ip_summed = CHECKSUM_UNNECESSARY;
242 /* Call netif interface */
243 netif_receive_skb(skb);
245 /* Update statistics */
246 kni->stats.rx_bytes += len;
247 kni->stats.rx_packets++;
251 /* Burst enqueue mbufs into free_q */
252 ret = kni_fifo_put(kni->free_q, (void **)va, num);
254 /* Failing should not happen */
255 KNI_ERR("Fail to enqueue entries into free_q\n");
259 * RX: loopback with enqueue/dequeue fifos.
262 kni_net_rx_lo_fifo(struct kni_dev *kni)
266 unsigned i, num, num_rq, num_tq, num_aq, num_fq;
267 struct rte_kni_mbuf *kva;
268 struct rte_kni_mbuf *va[MBUF_BURST_SZ];
271 struct rte_kni_mbuf *alloc_kva;
272 struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ];
273 void *alloc_data_kva;
275 /* Get the number of entries in rx_q */
276 num_rq = kni_fifo_count(kni->rx_q);
278 /* Get the number of free entrie in tx_q */
279 num_tq = kni_fifo_free_count(kni->tx_q);
281 /* Get the number of entries in alloc_q */
282 num_aq = kni_fifo_count(kni->alloc_q);
284 /* Get the number of free entries in free_q */
285 num_fq = kni_fifo_free_count(kni->free_q);
287 /* Calculate the number of entries to be dequeued from rx_q */
288 num = min(num_rq, num_tq);
289 num = min(num, num_aq);
290 num = min(num, num_fq);
291 num = min(num, (unsigned)MBUF_BURST_SZ);
293 /* Return if no entry to dequeue from rx_q */
297 /* Burst dequeue from rx_q */
298 ret = kni_fifo_get(kni->rx_q, (void **)va, num);
300 return; /* Failing should not happen */
302 /* Dequeue entries from alloc_q */
303 ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num);
307 for (i = 0; i < num; i++) {
308 kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
310 data_kva = kva->data - kni->mbuf_va +
313 alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
315 alloc_data_kva = alloc_kva->data - kni->mbuf_va +
317 memcpy(alloc_data_kva, data_kva, len);
318 alloc_kva->pkt_len = len;
319 alloc_kva->data_len = len;
321 kni->stats.tx_bytes += len;
322 kni->stats.rx_bytes += len;
325 /* Burst enqueue mbufs into tx_q */
326 ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num);
328 /* Failing should not happen */
329 KNI_ERR("Fail to enqueue mbufs into tx_q\n");
332 /* Burst enqueue mbufs into free_q */
333 ret = kni_fifo_put(kni->free_q, (void **)va, num);
335 /* Failing should not happen */
336 KNI_ERR("Fail to enqueue mbufs into free_q\n");
339 * Update statistic, and enqueue/dequeue failure is impossible,
340 * as all queues are checked at first.
342 kni->stats.tx_packets += num;
343 kni->stats.rx_packets += num;
347 * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
350 kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
354 unsigned i, num_rq, num_fq, num;
355 struct rte_kni_mbuf *kva;
356 struct rte_kni_mbuf *va[MBUF_BURST_SZ];
360 struct net_device *dev = kni->net_dev;
362 /* Get the number of entries in rx_q */
363 num_rq = kni_fifo_count(kni->rx_q);
365 /* Get the number of free entries in free_q */
366 num_fq = kni_fifo_free_count(kni->free_q);
368 /* Calculate the number of entries to dequeue from rx_q */
369 num = min(num_rq, num_fq);
370 num = min(num, (unsigned)MBUF_BURST_SZ);
372 /* Return if no entry to dequeue from rx_q */
376 /* Burst dequeue mbufs from rx_q */
377 ret = kni_fifo_get(kni->rx_q, (void **)va, num);
381 /* Copy mbufs to sk buffer and then call tx interface */
382 for (i = 0; i < num; i++) {
383 kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
385 data_kva = kva->data - kni->mbuf_va + kni->mbuf_kva;
387 skb = dev_alloc_skb(len + 2);
389 KNI_ERR("Out of mem, dropping pkts\n");
391 /* Align IP on 16B boundary */
393 memcpy(skb_put(skb, len), data_kva, len);
395 skb->protocol = eth_type_trans(skb, dev);
396 skb->ip_summed = CHECKSUM_UNNECESSARY;
400 /* Simulate real usage, allocate/copy skb twice */
401 skb = dev_alloc_skb(len + 2);
403 KNI_ERR("Out of mem, dropping pkts\n");
404 kni->stats.rx_dropped++;
407 /* Align IP on 16B boundary */
409 memcpy(skb_put(skb, len), data_kva, len);
411 skb->protocol = eth_type_trans(skb, dev);
412 skb->ip_summed = CHECKSUM_UNNECESSARY;
414 kni->stats.rx_bytes += len;
415 kni->stats.rx_packets++;
417 /* call tx interface */
418 kni_net_tx(skb, dev);
422 /* enqueue all the mbufs from rx_q into free_q */
423 ret = kni_fifo_put(kni->free_q, (void **)&va, num);
425 /* Failing should not happen */
426 KNI_ERR("Fail to enqueue mbufs into free_q\n");
431 kni_net_rx(struct kni_dev *kni)
434 * It doesn't need to check if it is NULL pointer,
435 * as it has a default value
437 (*kni_net_rx_func)(kni);
441 * Transmit a packet (called by the kernel)
444 kni_net_tx(struct sk_buff *skb, struct net_device *dev)
448 struct kni_dev *kni = netdev_priv(dev);
449 struct rte_kni_mbuf *pkt_kva = NULL;
450 struct rte_kni_mbuf *pkt_va = NULL;
452 dev->trans_start = jiffies; /* save the timestamp */
454 /* Check if the length of skb is less than mbuf size */
455 if (skb->len > kni->mbuf_size)
459 * Check if it has at least one free entry in tx_q and
460 * one entry in alloc_q.
462 if (kni_fifo_free_count(kni->tx_q) == 0 ||
463 kni_fifo_count(kni->alloc_q) == 0) {
465 * If no free entry in tx_q or no entry in alloc_q,
466 * drops skb and goes out.
471 /* dequeue a mbuf from alloc_q */
472 ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
473 if (likely(ret == 1)) {
476 pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
477 data_kva = pkt_kva->data - kni->mbuf_va + kni->mbuf_kva;
480 memcpy(data_kva, skb->data, len);
481 if (unlikely(len < ETH_ZLEN)) {
482 memset(data_kva + len, 0, ETH_ZLEN - len);
485 pkt_kva->pkt_len = len;
486 pkt_kva->data_len = len;
488 /* enqueue mbuf into tx_q */
489 ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
490 if (unlikely(ret != 1)) {
491 /* Failing should not happen */
492 KNI_ERR("Fail to enqueue mbuf into tx_q\n");
496 /* Failing should not happen */
497 KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
501 /* Free skb and update statistics */
503 kni->stats.tx_bytes += len;
504 kni->stats.tx_packets++;
509 /* Free skb and update statistics */
511 kni->stats.tx_dropped++;
517 * Deal with a transmit timeout.
520 kni_net_tx_timeout (struct net_device *dev)
522 struct kni_dev *kni = netdev_priv(dev);
524 KNI_DBG("Transmit timeout at %ld, latency %ld\n", jiffies,
525 jiffies - dev->trans_start);
527 kni->stats.tx_errors++;
528 netif_wake_queue(dev);
536 kni_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
538 struct kni_dev *kni = netdev_priv(dev);
539 KNI_DBG("kni_net_ioctl %d\n", kni->port_id);
545 kni_net_change_mtu(struct net_device *dev, int new_mtu)
548 struct rte_kni_request req;
549 struct kni_dev *kni = netdev_priv(dev);
551 KNI_DBG("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
553 memset(&req, 0, sizeof(req));
554 req.req_id = RTE_KNI_REQ_CHANGE_MTU;
555 req.new_mtu = new_mtu;
556 ret = kni_net_process_request(kni, &req);
557 if (ret == 0 && req.result == 0)
560 return (ret == 0 ? req.result : ret);
564 * Checks if the user space application provided the resp message
567 kni_net_poll_resp(struct kni_dev *kni)
569 if (kni_fifo_count(kni->resp_q))
570 wake_up_interruptible(&kni->wq);
574 * It can be called to process the request.
577 kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
585 KNI_ERR("No kni instance or request\n");
589 mutex_lock(&kni->sync_lock);
592 memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
593 num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
595 KNI_ERR("Cannot send to req_q\n");
600 ret_val = wait_event_interruptible_timeout(kni->wq,
601 kni_fifo_count(kni->resp_q), 3 * HZ);
602 if (signal_pending(current) || ret_val <= 0) {
606 num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
607 if (num != 1 || resp_va != kni->sync_va) {
608 /* This should never happen */
609 KNI_ERR("No data in resp_q\n");
614 memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
618 mutex_unlock(&kni->sync_lock);
623 * Return statistics to the caller
625 static struct net_device_stats *
626 kni_net_stats(struct net_device *dev)
628 struct kni_dev *kni = netdev_priv(dev);
633 * Fill the eth header
636 kni_net_header(struct sk_buff *skb, struct net_device *dev,
637 unsigned short type, const void *daddr,
638 const void *saddr, unsigned int len)
640 struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
642 memcpy(eth->h_source, saddr ? saddr : dev->dev_addr, dev->addr_len);
643 memcpy(eth->h_dest, daddr ? daddr : dev->dev_addr, dev->addr_len);
644 eth->h_proto = htons(type);
646 return (dev->hard_header_len);
651 * Re-fill the eth header
654 kni_net_rebuild_header(struct sk_buff *skb)
656 struct net_device *dev = skb->dev;
657 struct ethhdr *eth = (struct ethhdr *) skb->data;
659 memcpy(eth->h_source, dev->dev_addr, dev->addr_len);
660 memcpy(eth->h_dest, dev->dev_addr, dev->addr_len);
666 static const struct header_ops kni_net_header_ops = {
667 .create = kni_net_header,
668 .rebuild = kni_net_rebuild_header,
669 .cache = NULL, /* disable caching */
672 static const struct net_device_ops kni_net_netdev_ops = {
673 .ndo_open = kni_net_open,
674 .ndo_stop = kni_net_release,
675 .ndo_set_config = kni_net_config,
676 .ndo_start_xmit = kni_net_tx,
677 .ndo_change_mtu = kni_net_change_mtu,
678 .ndo_do_ioctl = kni_net_ioctl,
679 .ndo_get_stats = kni_net_stats,
680 .ndo_tx_timeout = kni_net_tx_timeout,
684 kni_net_init(struct net_device *dev)
686 struct kni_dev *kni = netdev_priv(dev);
688 KNI_DBG("kni_net_init\n");
690 init_waitqueue_head(&kni->wq);
691 mutex_init(&kni->sync_lock);
693 ether_setup(dev); /* assign some of the fields */
694 dev->netdev_ops = &kni_net_netdev_ops;
695 dev->header_ops = &kni_net_header_ops;
696 dev->watchdog_timeo = WD_TIMEOUT;
700 kni_net_config_lo_mode(char *lo_str)
703 KNI_PRINT("loopback disabled");
707 if (!strcmp(lo_str, "lo_mode_none"))
708 KNI_PRINT("loopback disabled");
709 else if (!strcmp(lo_str, "lo_mode_fifo")) {
710 KNI_PRINT("loopback mode=lo_mode_fifo enabled");
711 kni_net_rx_func = kni_net_rx_lo_fifo;
712 } else if (!strcmp(lo_str, "lo_mode_fifo_skb")) {
713 KNI_PRINT("loopback mode=lo_mode_fifo_skb enabled");
714 kni_net_rx_func = kni_net_rx_lo_fifo_skb;
716 KNI_PRINT("Incognizant parameter, loopback disabled");