#include <linux/skbuff.h>
#include <linux/kthread.h>
#include <linux/delay.h>
+#include <linux/rtnetlink.h>
#include <rte_kni_common.h>
#include <kni_fifo.h>
static inline void *
iova2data_kva(struct kni_dev *kni, struct rte_kni_mbuf *m)
{
- return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_physaddr) +
+ return phys_to_virt(iova_to_phys(kni->usr_tsk, m->buf_iova) +
m->data_off);
}
#endif
va = (void *)((unsigned long)pa +
(unsigned long)m->buf_addr -
- (unsigned long)m->buf_physaddr);
+ (unsigned long)m->buf_iova);
return va;
}
static void *
kva2data_kva(struct rte_kni_mbuf *m)
{
- return phys_to_virt(m->buf_physaddr + m->data_off);
+ return phys_to_virt(m->buf_iova + m->data_off);
}
static inline void *
* It can be called to process the request.
*/
static int
-kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
+kni_net_process_request(struct net_device *dev, struct rte_kni_request *req)
{
+ struct kni_dev *kni = netdev_priv(dev);
int ret = -1;
void *resp_va;
uint32_t num;
int ret_val;
- if (!kni || !req) {
- pr_err("No kni instance or request\n");
- return -EINVAL;
+ ASSERT_RTNL();
+
+ /* If we need to wait and RTNL mutex is held
+ * drop the mutex and hold reference to keep device
+ */
+ if (req->async == 0) {
+ dev_hold(dev);
+ rtnl_unlock();
}
mutex_lock(&kni->sync_lock);
goto fail;
}
+ /* No result available since request is handled
+ * asynchronously. set response to success.
+ */
+ if (req->async != 0) {
+ req->result = 0;
+ goto async;
+ }
+
ret_val = wait_event_interruptible_timeout(kni->wq,
kni_fifo_count(kni->resp_q), 3 * HZ);
if (signal_pending(current) || ret_val <= 0) {
}
memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
+async:
ret = 0;
fail:
mutex_unlock(&kni->sync_lock);
+ if (req->async == 0) {
+ rtnl_lock();
+ dev_put(dev);
+ }
return ret;
}
{
int ret;
struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
netif_start_queue(dev);
if (kni_dflt_carrier == 1)
/* Setting if_up to non-zero means up */
req.if_up = 1;
- ret = kni_net_process_request(kni, &req);
+ ret = kni_net_process_request(dev, &req);
return (ret == 0) ? req.result : ret;
}
{
int ret;
struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
netif_stop_queue(dev); /* can't transmit any more */
netif_carrier_off(dev);
/* Setting if_up to 0 means down */
req.if_up = 0;
- ret = kni_net_process_request(kni, &req);
+
+ /* request async because of the deadlock problem */
+ req.async = 1;
+
+ ret = kni_net_process_request(dev, &req);
return (ret == 0) ? req.result : ret;
}
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
+ kva = get_kva(kni, kva->next);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
}
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
+ kva = get_kva(kni, kva->next);
data_kva = kva2data_kva(kva);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
while (kva->next) {
- next_kva = pa2kva(kva->next);
+ next_kva = get_kva(kni, kva->next);
/* Convert physical address to virtual address */
kva->next = pa2va(kva->next, next_kva);
kva = next_kva;
/*
* Deal with a transmit timeout.
*/
+#ifdef HAVE_TX_TIMEOUT_TXQUEUE
+static void
+kni_net_tx_timeout(struct net_device *dev, unsigned int txqueue)
+#else
static void
kni_net_tx_timeout(struct net_device *dev)
+#endif
{
pr_debug("Transmit timeout at %ld, latency %ld\n", jiffies,
jiffies - dev_trans_start(dev));
{
int ret;
struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(dev);
pr_debug("kni_net_change_mtu new mtu %d to be set\n", new_mtu);
memset(&req, 0, sizeof(req));
req.req_id = RTE_KNI_REQ_CHANGE_MTU;
req.new_mtu = new_mtu;
- ret = kni_net_process_request(kni, &req);
+ ret = kni_net_process_request(dev, &req);
if (ret == 0 && req.result == 0)
dev->mtu = new_mtu;
kni_net_change_rx_flags(struct net_device *netdev, int flags)
{
struct rte_kni_request req;
- struct kni_dev *kni = netdev_priv(netdev);
memset(&req, 0, sizeof(req));
req.promiscusity = 0;
}
- kni_net_process_request(kni, &req);
+ kni_net_process_request(netdev, &req);
}
/*
{
int ret;
struct rte_kni_request req;
- struct kni_dev *kni;
struct sockaddr *addr = p;
memset(&req, 0, sizeof(req));
memcpy(req.mac_addr, addr->sa_data, netdev->addr_len);
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- kni = netdev_priv(netdev);
- ret = kni_net_process_request(kni, &req);
+ ret = kni_net_process_request(netdev, &req);
return (ret == 0 ? req.result : ret);
}