1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef RTE_EXEC_ENV_LINUX
6 #error "KNI is not supported"
12 #include <sys/ioctl.h>
14 #include <rte_spinlock.h>
15 #include <rte_string_fns.h>
16 #include <rte_ethdev.h>
17 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_tailq.h>
22 #include <rte_rwlock.h>
23 #include <rte_eal_memconfig.h>
24 #include <rte_kni_common.h>
25 #include "rte_kni_fifo.h"
27 #define MAX_MBUF_BURST_NUM 32
29 /* Maximum number of ring entries */
30 #define KNI_FIFO_COUNT_MAX 1024
31 #define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
32 sizeof(struct rte_kni_fifo))
34 #define KNI_REQUEST_MBUF_NUM_MAX 32
36 #define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
38 #define KNI_MZ_NAME_FMT "kni_info_%s"
39 #define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
40 #define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
41 #define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
42 #define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
43 #define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
44 #define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
45 #define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
47 TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
49 static struct rte_tailq_elem rte_kni_tailq = {
52 EAL_REGISTER_TAILQ(rte_kni_tailq)
58 char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
59 uint16_t group_id; /**< Group ID of KNI devices */
60 uint32_t slot_id; /**< KNI pool slot ID */
61 struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
62 unsigned int mbuf_size; /**< mbuf size */
64 const struct rte_memzone *m_tx_q; /**< TX queue memzone */
65 const struct rte_memzone *m_rx_q; /**< RX queue memzone */
66 const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
67 const struct rte_memzone *m_free_q; /**< Free queue memzone */
69 struct rte_kni_fifo *tx_q; /**< TX queue */
70 struct rte_kni_fifo *rx_q; /**< RX queue */
71 struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
72 struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
74 const struct rte_memzone *m_req_q; /**< Request queue memzone */
75 const struct rte_memzone *m_resp_q; /**< Response queue memzone */
76 const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
78 /* For request & response */
79 struct rte_kni_fifo *req_q; /**< Request queue */
80 struct rte_kni_fifo *resp_q; /**< Response queue */
81 void *sync_addr; /**< Req/Resp Mem address */
83 struct rte_kni_ops ops; /**< operations for request */
87 KNI_REQ_NO_REGISTER = 0,
91 static void kni_free_mbufs(struct rte_kni *kni);
92 static void kni_allocate_mbufs(struct rte_kni *kni);
94 static volatile int kni_fd = -1;
96 /* Shall be called before any allocation happens */
98 rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
100 if (rte_eal_iova_mode() != RTE_IOVA_PA) {
101 RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
105 /* Check FD and open */
107 kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
110 "Can not open /dev/%s\n", KNI_DEVICE);
118 static struct rte_kni *
119 __rte_kni_get(const char *name)
122 struct rte_tailq_entry *te;
123 struct rte_kni_list *kni_list;
125 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
127 TAILQ_FOREACH(te, kni_list, next) {
129 if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
140 kni_reserve_mz(struct rte_kni *kni)
142 char mz_name[RTE_MEMZONE_NAMESIZE];
144 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
145 kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
146 KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
148 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
149 kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
150 KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
152 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
153 kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
154 KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
156 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
157 kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
158 KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
160 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
161 kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
162 KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
164 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
165 kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
166 KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
168 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
169 kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
170 KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
175 rte_memzone_free(kni->m_resp_q);
177 rte_memzone_free(kni->m_req_q);
179 rte_memzone_free(kni->m_free_q);
181 rte_memzone_free(kni->m_alloc_q);
183 rte_memzone_free(kni->m_rx_q);
185 rte_memzone_free(kni->m_tx_q);
191 kni_release_mz(struct rte_kni *kni)
193 rte_memzone_free(kni->m_tx_q);
194 rte_memzone_free(kni->m_rx_q);
195 rte_memzone_free(kni->m_alloc_q);
196 rte_memzone_free(kni->m_free_q);
197 rte_memzone_free(kni->m_req_q);
198 rte_memzone_free(kni->m_resp_q);
199 rte_memzone_free(kni->m_sync_addr);
203 rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
204 const struct rte_kni_conf *conf,
205 struct rte_kni_ops *ops)
208 struct rte_kni_device_info dev_info;
210 struct rte_tailq_entry *te;
211 struct rte_kni_list *kni_list;
213 if (!pktmbuf_pool || !conf || !conf->name[0])
216 /* Check if KNI subsystem has been initialized */
218 RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
222 rte_mcfg_tailq_write_lock();
224 kni = __rte_kni_get(conf->name);
226 RTE_LOG(ERR, KNI, "KNI already exists\n");
230 te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
232 RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
236 kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
238 RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
242 strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE);
245 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
247 kni->ops.port_id = UINT16_MAX;
249 memset(&dev_info, 0, sizeof(dev_info));
250 dev_info.core_id = conf->core_id;
251 dev_info.force_bind = conf->force_bind;
252 dev_info.group_id = conf->group_id;
253 dev_info.mbuf_size = conf->mbuf_size;
254 dev_info.mtu = conf->mtu;
256 memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
258 strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
260 ret = kni_reserve_mz(kni);
265 kni->tx_q = kni->m_tx_q->addr;
266 kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
267 dev_info.tx_phys = kni->m_tx_q->phys_addr;
270 kni->rx_q = kni->m_rx_q->addr;
271 kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
272 dev_info.rx_phys = kni->m_rx_q->phys_addr;
275 kni->alloc_q = kni->m_alloc_q->addr;
276 kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
277 dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
280 kni->free_q = kni->m_free_q->addr;
281 kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
282 dev_info.free_phys = kni->m_free_q->phys_addr;
285 kni->req_q = kni->m_req_q->addr;
286 kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
287 dev_info.req_phys = kni->m_req_q->phys_addr;
290 kni->resp_q = kni->m_resp_q->addr;
291 kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
292 dev_info.resp_phys = kni->m_resp_q->phys_addr;
294 /* Req/Resp sync mem area */
295 kni->sync_addr = kni->m_sync_addr->addr;
296 dev_info.sync_va = kni->m_sync_addr->addr;
297 dev_info.sync_phys = kni->m_sync_addr->phys_addr;
299 kni->pktmbuf_pool = pktmbuf_pool;
300 kni->group_id = conf->group_id;
301 kni->mbuf_size = conf->mbuf_size;
303 ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
309 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
310 TAILQ_INSERT_TAIL(kni_list, te, next);
312 rte_mcfg_tailq_write_unlock();
314 /* Allocate mbufs and then put them into alloc_q */
315 kni_allocate_mbufs(kni);
326 rte_mcfg_tailq_write_unlock();
332 kni_free_fifo(struct rte_kni_fifo *fifo)
335 struct rte_mbuf *pkt;
338 ret = kni_fifo_get(fifo, (void **)&pkt, 1);
340 rte_pktmbuf_free(pkt);
345 va2pa(struct rte_mbuf *m)
347 return (void *)((unsigned long)m -
348 ((unsigned long)m->buf_addr -
349 (unsigned long)m->buf_iova));
353 va2pa_all(struct rte_mbuf *mbuf)
355 void *phy_mbuf = va2pa(mbuf);
356 struct rte_mbuf *next = mbuf->next;
358 mbuf->next = va2pa(next);
366 obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
367 unsigned obj_idx __rte_unused)
369 struct rte_mbuf *m = obj;
370 void *mbuf_phys = opaque;
372 if (va2pa(m) == mbuf_phys)
377 kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
383 ret = kni_fifo_get(fifo, &mbuf_phys, 1);
385 rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
390 rte_kni_release(struct rte_kni *kni)
392 struct rte_tailq_entry *te;
393 struct rte_kni_list *kni_list;
394 struct rte_kni_device_info dev_info;
400 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
402 rte_mcfg_tailq_write_lock();
404 TAILQ_FOREACH(te, kni_list, next) {
412 strlcpy(dev_info.name, kni->name, sizeof(dev_info.name));
413 if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
414 RTE_LOG(ERR, KNI, "Fail to release kni device\n");
418 TAILQ_REMOVE(kni_list, te, next);
420 rte_mcfg_tailq_write_unlock();
422 /* mbufs in all fifo should be released, except request/response */
424 /* wait until all rxq packets processed by kernel */
425 while (kni_fifo_count(kni->rx_q) && retry--)
428 if (kni_fifo_count(kni->rx_q))
429 RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
431 kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
432 kni_free_fifo(kni->tx_q);
433 kni_free_fifo(kni->free_q);
444 rte_mcfg_tailq_write_unlock();
449 /* default callback for request of configuring device mac address */
451 kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
455 if (!rte_eth_dev_is_valid_port(port_id)) {
456 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
460 RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id);
462 ret = rte_eth_dev_default_mac_addr_set(port_id,
463 (struct rte_ether_addr *)mac_addr);
465 RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n",
471 /* default callback for request of configuring promiscuous mode */
473 kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
477 if (!rte_eth_dev_is_valid_port(port_id)) {
478 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
482 RTE_LOG(INFO, KNI, "Configure promiscuous mode of %d to %d\n",
486 ret = rte_eth_promiscuous_enable(port_id);
488 ret = rte_eth_promiscuous_disable(port_id);
492 "Failed to %s promiscuous mode for port %u: %s\n",
493 to_on ? "enable" : "disable", port_id,
499 /* default callback for request of configuring allmulticast mode */
501 kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
503 if (!rte_eth_dev_is_valid_port(port_id)) {
504 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
508 RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
512 rte_eth_allmulticast_enable(port_id);
514 rte_eth_allmulticast_disable(port_id);
520 rte_kni_handle_request(struct rte_kni *kni)
523 struct rte_kni_request *req = NULL;
528 /* Get request mbuf */
529 ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
531 return 0; /* It is OK of can not getting the request mbuf */
533 if (req != kni->sync_addr) {
534 RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
538 /* Analyze the request and call the relevant actions for it */
539 switch (req->req_id) {
540 case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
541 if (kni->ops.change_mtu)
542 req->result = kni->ops.change_mtu(kni->ops.port_id,
545 case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
546 if (kni->ops.config_network_if)
547 req->result = kni->ops.config_network_if(kni->ops.port_id,
550 case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */
551 if (kni->ops.config_mac_address)
552 req->result = kni->ops.config_mac_address(
553 kni->ops.port_id, req->mac_addr);
554 else if (kni->ops.port_id != UINT16_MAX)
555 req->result = kni_config_mac_address(
556 kni->ops.port_id, req->mac_addr);
558 case RTE_KNI_REQ_CHANGE_PROMISC: /* Change PROMISCUOUS MODE */
559 if (kni->ops.config_promiscusity)
560 req->result = kni->ops.config_promiscusity(
561 kni->ops.port_id, req->promiscusity);
562 else if (kni->ops.port_id != UINT16_MAX)
563 req->result = kni_config_promiscusity(
564 kni->ops.port_id, req->promiscusity);
566 case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
567 if (kni->ops.config_allmulticast)
568 req->result = kni->ops.config_allmulticast(
569 kni->ops.port_id, req->allmulti);
570 else if (kni->ops.port_id != UINT16_MAX)
571 req->result = kni_config_allmulticast(
572 kni->ops.port_id, req->allmulti);
575 RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
576 req->result = -EINVAL;
580 /* Construct response mbuf and put it back to resp_q */
581 ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
583 RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
584 return -1; /* It is an error of can't putting the mbuf back */
591 rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
593 num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
594 void *phy_mbufs[num];
598 for (i = 0; i < num; i++)
599 phy_mbufs[i] = va2pa_all(mbufs[i]);
601 ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
603 /* Get mbufs from free_q and then free them */
610 rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
612 unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
614 /* If buffers removed, allocate mbufs and then put them into alloc_q */
616 kni_allocate_mbufs(kni);
622 kni_free_mbufs(struct rte_kni *kni)
625 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
627 ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
628 if (likely(ret > 0)) {
629 for (i = 0; i < ret; i++)
630 rte_pktmbuf_free(pkts[i]);
635 kni_allocate_mbufs(struct rte_kni *kni)
638 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
639 void *phys[MAX_MBUF_BURST_NUM];
642 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
643 offsetof(struct rte_kni_mbuf, pool));
644 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
645 offsetof(struct rte_kni_mbuf, buf_addr));
646 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
647 offsetof(struct rte_kni_mbuf, next));
648 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
649 offsetof(struct rte_kni_mbuf, data_off));
650 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
651 offsetof(struct rte_kni_mbuf, data_len));
652 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
653 offsetof(struct rte_kni_mbuf, pkt_len));
654 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
655 offsetof(struct rte_kni_mbuf, ol_flags));
657 /* Check if pktmbuf pool has been configured */
658 if (kni->pktmbuf_pool == NULL) {
659 RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
663 allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1)
664 & (MAX_MBUF_BURST_NUM - 1);
665 for (i = 0; i < allocq_free; i++) {
666 pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
667 if (unlikely(pkts[i] == NULL)) {
669 RTE_LOG(ERR, KNI, "Out of memory\n");
672 phys[i] = va2pa(pkts[i]);
675 /* No pkt mbuf allocated */
679 ret = kni_fifo_put(kni->alloc_q, phys, i);
681 /* Check if any mbufs not put into alloc_q, and then free them */
682 if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
685 for (j = ret; j < i; j++)
686 rte_pktmbuf_free(pkts[j]);
691 rte_kni_get(const char *name)
695 if (name == NULL || name[0] == '\0')
698 rte_mcfg_tailq_read_lock();
700 kni = __rte_kni_get(name);
702 rte_mcfg_tailq_read_unlock();
708 rte_kni_get_name(const struct rte_kni *kni)
713 static enum kni_ops_status
714 kni_check_request_register(struct rte_kni_ops *ops)
716 /* check if KNI request ops has been registered*/
718 return KNI_REQ_NO_REGISTER;
720 if (ops->change_mtu == NULL
721 && ops->config_network_if == NULL
722 && ops->config_mac_address == NULL
723 && ops->config_promiscusity == NULL
724 && ops->config_allmulticast == NULL)
725 return KNI_REQ_NO_REGISTER;
727 return KNI_REQ_REGISTERED;
731 rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops)
733 enum kni_ops_status req_status;
736 RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
741 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
745 req_status = kni_check_request_register(&kni->ops);
746 if (req_status == KNI_REQ_REGISTERED) {
747 RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
751 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
756 rte_kni_unregister_handlers(struct rte_kni *kni)
759 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
763 memset(&kni->ops, 0, sizeof(struct rte_kni_ops));
769 rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
773 const char *new_carrier;
780 snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier",
783 fd = open(path, O_RDWR);
785 RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path);
789 ret = read(fd, old_carrier, 2);
794 old_linkup = (old_carrier[0] == '1');
796 new_carrier = linkup ? "1" : "0";
797 ret = write(fd, new_carrier, 1);
799 RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path);