#include <fcntl.h>
#include <unistd.h>
#include <sys/ioctl.h>
+#include <linux/version.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
int
rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
if (rte_eal_iova_mode() != RTE_IOVA_PA) {
RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
return -1;
}
+#endif
/* Check FD and open */
if (kni_fd < 0) {
char mz_name[RTE_MEMZONE_NAMESIZE];
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
- kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
- kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
- kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
- kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
- kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
- kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
- kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
+ kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
+ RTE_MEMZONE_IOVA_CONTIG);
KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
return 0;
dev_info.group_id = conf->group_id;
dev_info.mbuf_size = conf->mbuf_size;
dev_info.mtu = conf->mtu;
+ dev_info.min_mtu = conf->min_mtu;
+ dev_info.max_mtu = conf->max_mtu;
memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
kni->group_id = conf->group_id;
kni->mbuf_size = conf->mbuf_size;
+ dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0;
+
ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
if (ret < 0)
goto ioctl_fail;
(unsigned long)m->buf_iova));
}
+static void *
+va2pa_all(struct rte_mbuf *mbuf)
+{
+ void *phy_mbuf = va2pa(mbuf);
+ struct rte_mbuf *next = mbuf->next;
+ while (next) {
+ mbuf->next = va2pa(next);
+ mbuf = next;
+ next = mbuf->next;
+ }
+ return phy_mbuf;
+}
+
static void
obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
unsigned obj_idx __rte_unused)
static int
kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
{
+ int ret;
+
if (!rte_eth_dev_is_valid_port(port_id)) {
RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
return -EINVAL;
port_id, to_on);
if (to_on)
- rte_eth_promiscuous_enable(port_id);
+ ret = rte_eth_promiscuous_enable(port_id);
else
- rte_eth_promiscuous_disable(port_id);
+ ret = rte_eth_promiscuous_disable(port_id);
+
+ if (ret != 0)
+ RTE_LOG(ERR, KNI,
+ "Failed to %s promiscuous mode for port %u: %s\n",
+ to_on ? "enable" : "disable", port_id,
+ rte_strerror(-ret));
+
+ return ret;
+}
+
+/* default callback for request of configuring allmulticast mode */
+static int
+kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
+{
+ if (!rte_eth_dev_is_valid_port(port_id)) {
+ RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
+ return -EINVAL;
+ }
+
+ RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
+ port_id, to_on);
+
+ if (to_on)
+ rte_eth_allmulticast_enable(port_id);
+ else
+ rte_eth_allmulticast_disable(port_id);
return 0;
}
req->result = kni_config_promiscusity(
kni->ops.port_id, req->promiscusity);
break;
+ case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
+ if (kni->ops.config_allmulticast)
+ req->result = kni->ops.config_allmulticast(
+ kni->ops.port_id, req->allmulti);
+ else if (kni->ops.port_id != UINT16_MAX)
+ req->result = kni_config_allmulticast(
+ kni->ops.port_id, req->allmulti);
+ break;
default:
RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
req->result = -EINVAL;
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
{
+ num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
void *phy_mbufs[num];
unsigned int ret;
unsigned int i;
for (i = 0; i < num; i++)
- phy_mbufs[i] = va2pa(mbufs[i]);
+ phy_mbufs[i] = va2pa_all(mbufs[i]);
ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
if (ops->change_mtu == NULL
&& ops->config_network_if == NULL
&& ops->config_mac_address == NULL
- && ops->config_promiscusity == NULL)
+ && ops->config_promiscusity == NULL
+ && ops->config_allmulticast == NULL)
return KNI_REQ_NO_REGISTER;
return KNI_REQ_REGISTERED;