struct net_device_stats stats;
int status;
uint16_t group_id; /* Group ID of a group of KNI devices */
- unsigned int core_id; /* Core ID to bind */
+ uint32_t core_id; /* Core ID to bind */
char name[RTE_KNI_NAMESIZE]; /* Network device name */
struct task_struct *pthread;
void *mbuf_va;
/* mbuf size */
- unsigned int mbuf_size;
+ uint32_t mbuf_size;
/* synchro for request processing */
unsigned long synchro;
};
#ifdef RTE_KNI_VHOST
-unsigned int
+uint32_t
kni_poll(struct file *file, struct socket *sock, poll_table * wait);
int kni_chk_vhost_rx(struct kni_dev *kni);
int kni_vhost_init(struct kni_dev *kni);
int vnet_hdr_sz;
struct kni_dev *kni;
int sockfd;
- unsigned int flags;
+ uint32_t flags;
struct sk_buff *cache;
struct rte_kni_fifo *fifo;
};
/**
* Adds num elements into the fifo. Return the number actually written
*/
-static inline unsigned int
-kni_fifo_put(struct rte_kni_fifo *fifo, void **data, unsigned int num)
+static inline uint32_t
+kni_fifo_put(struct rte_kni_fifo *fifo, void **data, uint32_t num)
{
- unsigned int i = 0;
- unsigned int fifo_write = fifo->write;
- unsigned int fifo_read = fifo->read;
- unsigned int new_write = fifo_write;
+ uint32_t i = 0;
+ uint32_t fifo_write = fifo->write;
+ uint32_t fifo_read = fifo->read;
+ uint32_t new_write = fifo_write;
for (i = 0; i < num; i++) {
new_write = (new_write + 1) & (fifo->len - 1);
/**
* Get up to num elements from the fifo. Return the number actully read
*/
-static inline unsigned int
-kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned int num)
+static inline uint32_t
+kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
{
- unsigned int i = 0;
- unsigned int new_read = fifo->read;
- unsigned int fifo_write = fifo->write;
+ uint32_t i = 0;
+ uint32_t new_read = fifo->read;
+ uint32_t fifo_write = fifo->write;
for (i = 0; i < num; i++) {
if (new_read == fifo_write)
/**
* Get the num of elements in the fifo
*/
-static inline unsigned int
+static inline uint32_t
kni_fifo_count(struct rte_kni_fifo *fifo)
{
return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
/**
* Get the num of available elements in the fifo
*/
-static inline unsigned int
+static inline uint32_t
kni_fifo_free_count(struct rte_kni_fifo *fifo)
{
return (fifo->read - fifo->write - 1) & (fifo->len - 1);
* Initializes the kni fifo structure
*/
static inline void
-kni_fifo_init(struct rte_kni_fifo *fifo, unsigned int size)
+kni_fifo_init(struct rte_kni_fifo *fifo, uint32_t size)
{
fifo->write = 0;
fifo->read = 0;
/* Kernel thread mode */
static char *kthread_mode;
-static unsigned int multiple_kthread_on;
+static uint32_t multiple_kthread_on;
#define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
}
static int
-kni_ioctl_create(struct net *net,
- unsigned int ioctl_num, unsigned long ioctl_param)
+kni_ioctl_create(struct net *net, uint32_t ioctl_num,
+ unsigned long ioctl_param)
{
struct kni_net *knet = net_generic(net, kni_net_id);
int ret;
}
static int
-kni_ioctl_release(struct net *net,
- unsigned int ioctl_num, unsigned long ioctl_param)
+kni_ioctl_release(struct net *net, uint32_t ioctl_num,
+ unsigned long ioctl_param)
{
struct kni_net *knet = net_generic(net, kni_net_id);
int ret = -EINVAL;
}
static int
-kni_ioctl(struct inode *inode,
- unsigned int ioctl_num,
- unsigned long ioctl_param)
+kni_ioctl(struct inode *inode, uint32_t ioctl_num, unsigned long ioctl_param)
{
int ret = -EINVAL;
struct net *net = current->nsproxy->net_ns;
}
static int
-kni_compat_ioctl(struct inode *inode,
- unsigned int ioctl_num,
+kni_compat_ioctl(struct inode *inode, uint32_t ioctl_num,
unsigned long ioctl_param)
{
/* 32 bits app on 64 bits OS to be supported later */
{
int ret = -1;
void *resp_va;
- unsigned int num;
+ uint32_t num;
int ret_val;
if (!kni || !req) {
kni_net_tx(struct sk_buff *skb, struct net_device *dev)
{
int len = 0;
- unsigned int ret;
+ uint32_t ret;
struct kni_dev *kni = netdev_priv(dev);
struct rte_kni_mbuf *pkt_kva = NULL;
void *pkt_pa = NULL;
static void
kni_net_rx_normal(struct kni_dev *kni)
{
- unsigned int ret;
+ uint32_t ret;
uint32_t len;
- unsigned int i, num_rx, num_fq;
+ uint32_t i, num_rx, num_fq;
struct rte_kni_mbuf *kva;
void *data_kva;
struct sk_buff *skb;
}
/* Calculate the number of entries to dequeue from rx_q */
- num_rx = min_t(unsigned int, num_fq, MBUF_BURST_SZ);
+ num_rx = min_t(uint32_t, num_fq, MBUF_BURST_SZ);
/* Burst dequeue from rx_q */
num_rx = kni_fifo_get(kni->rx_q, kni->pa, num_rx);
static void
kni_net_rx_lo_fifo(struct kni_dev *kni)
{
- unsigned int ret;
+ uint32_t ret;
uint32_t len;
- unsigned int i, num, num_rq, num_tq, num_aq, num_fq;
+ uint32_t i, num, num_rq, num_tq, num_aq, num_fq;
struct rte_kni_mbuf *kva;
void *data_kva;
struct rte_kni_mbuf *alloc_kva;
num = min(num_rq, num_tq);
num = min(num, num_aq);
num = min(num, num_fq);
- num = min_t(unsigned int, num, MBUF_BURST_SZ);
+ num = min_t(uint32_t, num, MBUF_BURST_SZ);
/* Return if no entry to dequeue from rx_q */
if (num == 0)
static void
kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
{
- unsigned int ret;
+ uint32_t ret;
uint32_t len;
- unsigned int i, num_rq, num_fq, num;
+ uint32_t i, num_rq, num_fq, num;
struct rte_kni_mbuf *kva;
void *data_kva;
struct sk_buff *skb;
/* Calculate the number of entries to dequeue from rx_q */
num = min(num_rq, num_fq);
- num = min_t(unsigned int, num, MBUF_BURST_SZ);
+ num = min_t(uint32_t, num, MBUF_BURST_SZ);
/* Return if no entry to dequeue from rx_q */
if (num == 0)
static int
kni_net_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
- const void *saddr, unsigned int len)
+ const void *saddr, uint32_t len)
{
struct ethhdr *eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
static inline int
kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
- unsigned int offset, unsigned int len)
+ uint32_t offset, uint32_t len)
{
struct rte_kni_mbuf *pkt_kva = NULL;
struct rte_kni_mbuf *pkt_va = NULL;
static inline int
kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
- unsigned int offset, unsigned int len)
+ uint32_t offset, uint32_t len)
{
uint32_t pkt_len;
struct rte_kni_mbuf *kva;
return 0;
}
-static unsigned int
+static uint32_t
kni_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct kni_vhost_queue *q =
container_of(sock->sk, struct kni_vhost_queue, sk);
struct kni_dev *kni;
- unsigned int mask = 0;
+ uint32_t mask = 0;
if (unlikely(q == NULL || q->kni == NULL))
return POLLERR;
kni_chk_vhost_rx(struct kni_dev *kni)
{
struct kni_vhost_queue *q = kni->vhost_queue;
- unsigned int nb_in, nb_mbuf, nb_skb;
- const unsigned int BURST_MASK = RX_BURST_SZ - 1;
- unsigned int nb_burst, nb_backlog, i;
+ uint32_t nb_in, nb_mbuf, nb_skb;
+ const uint32_t BURST_MASK = RX_BURST_SZ - 1;
+ uint32_t nb_burst, nb_backlog, i;
struct sk_buff *skb[RX_BURST_SZ];
struct rte_kni_mbuf *va[RX_BURST_SZ];
nb_mbuf = kni_fifo_count(kni->rx_q);
nb_in = min(nb_mbuf, nb_skb);
- nb_in = min_t(unsigned int, nb_in, RX_BURST_SZ);
+ nb_in = min_t(uint32_t, nb_in, RX_BURST_SZ);
nb_burst = (nb_in & ~BURST_MASK);
nb_backlog = (nb_in & BURST_MASK);
/* dummy tap like ioctl */
static int
-kni_sock_ioctl(struct socket *sock, unsigned int cmd,
- unsigned long arg)
+kni_sock_ioctl(struct socket *sock, uint32_t cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
struct ifreq __user *ifr = argp;
- unsigned int __user *up = argp;
+ uint32_t __user *up = argp;
struct kni_vhost_queue *q =
container_of(sock->sk, struct kni_vhost_queue, sk);
struct kni_dev *kni;
- unsigned int u;
+ uint32_t u;
int __user *sp = argp;
int s;
int ret;
}
static int
-kni_sock_compat_ioctl(struct socket *sock, unsigned int cmd,
+kni_sock_compat_ioctl(struct socket *sock, uint32_t cmd,
unsigned long arg)
{
/* 32 bits app on 64 bits OS to be supported later */