int
rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
+ if (rte_eal_iova_mode() != RTE_IOVA_PA) {
+ RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
+ return -1;
+ }
+
/* Check FD and open */
if (kni_fd < 0) {
kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
return NULL;
}
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
kni = __rte_kni_get(conf->name);
if (kni != NULL) {
kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
TAILQ_INSERT_TAIL(kni_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
/* Allocate mbufs and then put them into alloc_q */
kni_allocate_mbufs(kni);
kni_fail:
rte_free(te);
unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return NULL;
}
kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
- rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_lock();
TAILQ_FOREACH(te, kni_list, next) {
if (te->data == kni)
TAILQ_REMOVE(kni_list, te, next);
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
/* mbufs in all fifo should be released, except request/response */
return 0;
unlock:
- rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_write_unlock();
return -1;
}
if (name == NULL || name[0] == '\0')
return NULL;
- rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_lock();
kni = __rte_kni_get(name);
- rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+ rte_mcfg_tailq_read_unlock();
return kni;
}