ASSERT_RTNL();
+ /* If we need to wait and RTNL mutex is held
+ * drop the mutex and hold reference to keep device
+ */
+ if (req->async == 0) {
+ dev_hold(dev);
+ rtnl_unlock();
+ }
+
mutex_lock(&kni->sync_lock);
/* Construct data */
goto fail;
}
+ /* No result available since request is handled
+ * asynchronously. set response to success.
+ */
+ if (req->async != 0) {
+ req->result = 0;
+ goto async;
+ }
+
ret_val = wait_event_interruptible_timeout(kni->wq,
kni_fifo_count(kni->resp_q), 3 * HZ);
if (signal_pending(current) || ret_val <= 0) {
}
memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
+async:
ret = 0;
fail:
mutex_unlock(&kni->sync_lock);
+ if (req->async == 0) {
+ rtnl_lock();
+ dev_put(dev);
+ }
return ret;
}
/* Setting if_up to 0 means down */
req.if_up = 0;
+
+ /* request async because of the deadlock problem */
+ req.async = 1;
+
ret = kni_net_process_request(dev, &req);
return (ret == 0) ? req.result : ret;
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
+ kva = get_kva(kni, kva->next);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
}
break;
prev_kva = kva;
- kva = pa2kva(kva->next);
+ kva = get_kva(kni, kva->next);
data_kva = kva2data_kva(kva);
/* Convert physical address to virtual address */
prev_kva->next = pa2va(prev_kva->next, kva);
kni->va[i] = pa2va(kni->pa[i], kva);
while (kva->next) {
- next_kva = pa2kva(kva->next);
+ next_kva = get_kva(kni, kva->next);
/* Convert physical address to virtual address */
kva->next = pa2va(kva->next, next_kva);
kva = next_kva;