1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef RTE_EXEC_ENV_LINUXAPP
6 #error "KNI is not supported"
12 #include <sys/ioctl.h>
14 #include <rte_spinlock.h>
15 #include <rte_string_fns.h>
16 #include <rte_ethdev.h>
17 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <exec-env/rte_kni_common.h>
22 #include "rte_kni_fifo.h"
24 #define MAX_MBUF_BURST_NUM 32
26 /* Maximum number of ring entries */
27 #define KNI_FIFO_COUNT_MAX 1024
28 #define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
29 sizeof(struct rte_kni_fifo))
31 #define KNI_REQUEST_MBUF_NUM_MAX 32
33 #define KNI_MEM_CHECK(cond) do { if (cond) goto kni_fail; } while (0)
39 char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
40 uint16_t group_id; /**< Group ID of KNI devices */
41 uint32_t slot_id; /**< KNI pool slot ID */
42 struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
43 unsigned mbuf_size; /**< mbuf size */
45 struct rte_kni_fifo *tx_q; /**< TX queue */
46 struct rte_kni_fifo *rx_q; /**< RX queue */
47 struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
48 struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
50 /* For request & response */
51 struct rte_kni_fifo *req_q; /**< Request queue */
52 struct rte_kni_fifo *resp_q; /**< Response queue */
53 void * sync_addr; /**< Req/Resp Mem address */
55 struct rte_kni_ops ops; /**< operations for request */
56 uint8_t in_use : 1; /**< kni in use */
60 KNI_REQ_NO_REGISTER = 0,
65 * KNI memzone pool slot
67 struct rte_kni_memzone_slot {
69 uint8_t in_use : 1; /**< slot in use */
72 const struct rte_memzone *m_ctx; /**< KNI ctx */
73 const struct rte_memzone *m_tx_q; /**< TX queue */
74 const struct rte_memzone *m_rx_q; /**< RX queue */
75 const struct rte_memzone *m_alloc_q; /**< Allocated mbufs queue */
76 const struct rte_memzone *m_free_q; /**< To be freed mbufs queue */
77 const struct rte_memzone *m_req_q; /**< Request queue */
78 const struct rte_memzone *m_resp_q; /**< Response queue */
79 const struct rte_memzone *m_sync_addr;
81 /* Free linked list */
82 struct rte_kni_memzone_slot *next; /**< Next slot link.list */
88 struct rte_kni_memzone_pool {
89 uint8_t initialized : 1; /**< Global KNI pool init flag */
91 uint32_t max_ifaces; /**< Max. num of KNI ifaces */
92 struct rte_kni_memzone_slot *slots; /**< Pool slots */
93 rte_spinlock_t mutex; /**< alloc/release mutex */
95 /* Free memzone slots linked-list */
96 struct rte_kni_memzone_slot *free; /**< First empty slot */
97 struct rte_kni_memzone_slot *free_tail; /**< Last empty slot */
101 static void kni_free_mbufs(struct rte_kni *kni);
102 static void kni_allocate_mbufs(struct rte_kni *kni);
104 static volatile int kni_fd = -1;
105 static struct rte_kni_memzone_pool kni_memzone_pool = {
109 static const struct rte_memzone *
110 kni_memzone_reserve(const char *name, size_t len, int socket_id,
113 const struct rte_memzone *mz = rte_memzone_lookup(name);
116 mz = rte_memzone_reserve(name, len, socket_id, flags);
122 static struct rte_kni_memzone_slot*
123 kni_memzone_pool_alloc(void)
125 struct rte_kni_memzone_slot *slot;
127 rte_spinlock_lock(&kni_memzone_pool.mutex);
129 if (!kni_memzone_pool.free) {
130 rte_spinlock_unlock(&kni_memzone_pool.mutex);
134 slot = kni_memzone_pool.free;
135 kni_memzone_pool.free = slot->next;
138 if (!kni_memzone_pool.free)
139 kni_memzone_pool.free_tail = NULL;
141 rte_spinlock_unlock(&kni_memzone_pool.mutex);
147 kni_memzone_pool_release(struct rte_kni_memzone_slot *slot)
149 rte_spinlock_lock(&kni_memzone_pool.mutex);
151 if (kni_memzone_pool.free)
152 kni_memzone_pool.free_tail->next = slot;
154 kni_memzone_pool.free = slot;
156 kni_memzone_pool.free_tail = slot;
160 rte_spinlock_unlock(&kni_memzone_pool.mutex);
164 /* Shall be called before any allocation happens */
166 rte_kni_init(unsigned int max_kni_ifaces)
169 struct rte_kni_memzone_slot *it;
170 const struct rte_memzone *mz;
172 char obj_name[OBJNAMSIZ];
173 char mz_name[RTE_MEMZONE_NAMESIZE];
175 /* Immediately return if KNI is already initialized */
176 if (kni_memzone_pool.initialized) {
177 RTE_LOG(WARNING, KNI, "Double call to rte_kni_init()");
181 if (max_kni_ifaces == 0) {
182 RTE_LOG(ERR, KNI, "Invalid number of max_kni_ifaces %d\n",
184 RTE_LOG(ERR, KNI, "Unable to initialize KNI\n");
188 /* Check FD and open */
190 kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
193 "Can not open /dev/%s\n", KNI_DEVICE);
198 /* Allocate slot objects */
199 kni_memzone_pool.slots = (struct rte_kni_memzone_slot *)
201 sizeof(struct rte_kni_memzone_slot) *
204 KNI_MEM_CHECK(kni_memzone_pool.slots == NULL);
206 /* Initialize general pool variables */
207 kni_memzone_pool.initialized = 1;
208 kni_memzone_pool.max_ifaces = max_kni_ifaces;
209 kni_memzone_pool.free = &kni_memzone_pool.slots[0];
210 rte_spinlock_init(&kni_memzone_pool.mutex);
212 /* Pre-allocate all memzones of all the slots; panic on error */
213 for (i = 0; i < max_kni_ifaces; i++) {
215 /* Recover current slot */
216 it = &kni_memzone_pool.slots[i];
219 /* Allocate KNI context */
220 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, "KNI_INFO_%d", i);
221 mz = kni_memzone_reserve(mz_name, sizeof(struct rte_kni),
223 KNI_MEM_CHECK(mz == NULL);
227 snprintf(obj_name, OBJNAMSIZ, "kni_tx_%d", i);
228 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
230 KNI_MEM_CHECK(mz == NULL);
234 snprintf(obj_name, OBJNAMSIZ, "kni_rx_%d", i);
235 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
237 KNI_MEM_CHECK(mz == NULL);
241 snprintf(obj_name, OBJNAMSIZ, "kni_alloc_%d", i);
242 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
244 KNI_MEM_CHECK(mz == NULL);
248 snprintf(obj_name, OBJNAMSIZ, "kni_free_%d", i);
249 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
251 KNI_MEM_CHECK(mz == NULL);
255 snprintf(obj_name, OBJNAMSIZ, "kni_req_%d", i);
256 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
258 KNI_MEM_CHECK(mz == NULL);
262 snprintf(obj_name, OBJNAMSIZ, "kni_resp_%d", i);
263 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
265 KNI_MEM_CHECK(mz == NULL);
268 /* Req/Resp sync mem area */
269 snprintf(obj_name, OBJNAMSIZ, "kni_sync_%d", i);
270 mz = kni_memzone_reserve(obj_name, KNI_FIFO_SIZE,
272 KNI_MEM_CHECK(mz == NULL);
273 it->m_sync_addr = mz;
275 if ((i+1) == max_kni_ifaces) {
277 kni_memzone_pool.free_tail = it;
279 it->next = &kni_memzone_pool.slots[i+1];
285 RTE_LOG(ERR, KNI, "Unable to allocate memory for max_kni_ifaces:%d."
286 "Increase the amount of hugepages memory\n", max_kni_ifaces);
291 rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
292 const struct rte_kni_conf *conf,
293 struct rte_kni_ops *ops)
296 struct rte_kni_device_info dev_info;
298 char intf_name[RTE_KNI_NAMESIZE];
299 const struct rte_memzone *mz;
300 struct rte_kni_memzone_slot *slot = NULL;
302 if (!pktmbuf_pool || !conf || !conf->name[0])
305 /* Check if KNI subsystem has been initialized */
306 if (kni_memzone_pool.initialized != 1) {
307 RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
311 /* Get an available slot from the pool */
312 slot = kni_memzone_pool_alloc();
314 RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
315 kni_memzone_pool.max_ifaces);
320 ctx = slot->m_ctx->addr;
321 snprintf(intf_name, RTE_KNI_NAMESIZE, "%s", conf->name);
324 RTE_LOG(ERR, KNI, "KNI %s is in use\n", ctx->name);
327 memset(ctx, 0, sizeof(struct rte_kni));
329 memcpy(&ctx->ops, ops, sizeof(struct rte_kni_ops));
331 memset(&dev_info, 0, sizeof(dev_info));
332 dev_info.bus = conf->addr.bus;
333 dev_info.devid = conf->addr.devid;
334 dev_info.function = conf->addr.function;
335 dev_info.vendor_id = conf->id.vendor_id;
336 dev_info.device_id = conf->id.device_id;
337 dev_info.core_id = conf->core_id;
338 dev_info.force_bind = conf->force_bind;
339 dev_info.group_id = conf->group_id;
340 dev_info.mbuf_size = conf->mbuf_size;
342 snprintf(ctx->name, RTE_KNI_NAMESIZE, "%s", intf_name);
343 snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", intf_name);
345 RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n",
346 dev_info.bus, dev_info.devid, dev_info.function,
347 dev_info.vendor_id, dev_info.device_id);
350 ctx->tx_q = mz->addr;
351 kni_fifo_init(ctx->tx_q, KNI_FIFO_COUNT_MAX);
352 dev_info.tx_phys = mz->phys_addr;
356 ctx->rx_q = mz->addr;
357 kni_fifo_init(ctx->rx_q, KNI_FIFO_COUNT_MAX);
358 dev_info.rx_phys = mz->phys_addr;
361 mz = slot->m_alloc_q;
362 ctx->alloc_q = mz->addr;
363 kni_fifo_init(ctx->alloc_q, KNI_FIFO_COUNT_MAX);
364 dev_info.alloc_phys = mz->phys_addr;
368 ctx->free_q = mz->addr;
369 kni_fifo_init(ctx->free_q, KNI_FIFO_COUNT_MAX);
370 dev_info.free_phys = mz->phys_addr;
374 ctx->req_q = mz->addr;
375 kni_fifo_init(ctx->req_q, KNI_FIFO_COUNT_MAX);
376 dev_info.req_phys = mz->phys_addr;
380 ctx->resp_q = mz->addr;
381 kni_fifo_init(ctx->resp_q, KNI_FIFO_COUNT_MAX);
382 dev_info.resp_phys = mz->phys_addr;
384 /* Req/Resp sync mem area */
385 mz = slot->m_sync_addr;
386 ctx->sync_addr = mz->addr;
387 dev_info.sync_va = mz->addr;
388 dev_info.sync_phys = mz->phys_addr;
390 ctx->pktmbuf_pool = pktmbuf_pool;
391 ctx->group_id = conf->group_id;
392 ctx->slot_id = slot->id;
393 ctx->mbuf_size = conf->mbuf_size;
395 ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
396 KNI_MEM_CHECK(ret < 0);
400 /* Allocate mbufs and then put them into alloc_q */
401 kni_allocate_mbufs(ctx);
407 kni_memzone_pool_release(&kni_memzone_pool.slots[slot->id]);
413 kni_free_fifo(struct rte_kni_fifo *fifo)
416 struct rte_mbuf *pkt;
419 ret = kni_fifo_get(fifo, (void **)&pkt, 1);
421 rte_pktmbuf_free(pkt);
426 va2pa(struct rte_mbuf *m)
428 return (void *)((unsigned long)m -
429 ((unsigned long)m->buf_addr -
430 (unsigned long)m->buf_iova));
434 obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
435 unsigned obj_idx __rte_unused)
437 struct rte_mbuf *m = obj;
438 void *mbuf_phys = opaque;
440 if (va2pa(m) == mbuf_phys)
445 kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
451 ret = kni_fifo_get(fifo, &mbuf_phys, 1);
453 rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
458 rte_kni_release(struct rte_kni *kni)
460 struct rte_kni_device_info dev_info;
464 if (!kni || !kni->in_use)
467 snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name);
468 if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
469 RTE_LOG(ERR, KNI, "Fail to release kni device\n");
473 /* mbufs in all fifo should be released, except request/response */
475 /* wait until all rxq packets processed by kernel */
476 while (kni_fifo_count(kni->rx_q) && retry--)
479 if (kni_fifo_count(kni->rx_q))
480 RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
482 kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
483 kni_free_fifo(kni->tx_q);
484 kni_free_fifo(kni->free_q);
486 slot_id = kni->slot_id;
488 /* Memset the KNI struct */
489 memset(kni, 0, sizeof(struct rte_kni));
491 /* Release memzone */
492 if (slot_id > kni_memzone_pool.max_ifaces) {
493 RTE_LOG(ERR, KNI, "KNI pool: corrupted slot ID: %d, max: %d\n",
494 slot_id, kni_memzone_pool.max_ifaces);
497 kni_memzone_pool_release(&kni_memzone_pool.slots[slot_id]);
503 rte_kni_handle_request(struct rte_kni *kni)
506 struct rte_kni_request *req;
511 /* Get request mbuf */
512 ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
514 return 0; /* It is OK of can not getting the request mbuf */
516 if (req != kni->sync_addr) {
517 RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
521 /* Analyze the request and call the relevant actions for it */
522 switch (req->req_id) {
523 case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
524 if (kni->ops.change_mtu)
525 req->result = kni->ops.change_mtu(kni->ops.port_id,
528 case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
529 if (kni->ops.config_network_if)
530 req->result = kni->ops.config_network_if(\
531 kni->ops.port_id, req->if_up);
534 RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
535 req->result = -EINVAL;
539 /* Construct response mbuf and put it back to resp_q */
540 ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
542 RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
543 return -1; /* It is an error of can't putting the mbuf back */
550 rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
552 void *phy_mbufs[num];
556 for (i = 0; i < num; i++)
557 phy_mbufs[i] = va2pa(mbufs[i]);
559 ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
561 /* Get mbufs from free_q and then free them */
568 rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
570 unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
572 /* If buffers removed, allocate mbufs and then put them into alloc_q */
574 kni_allocate_mbufs(kni);
580 kni_free_mbufs(struct rte_kni *kni)
583 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
585 ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
586 if (likely(ret > 0)) {
587 for (i = 0; i < ret; i++)
588 rte_pktmbuf_free(pkts[i]);
593 kni_allocate_mbufs(struct rte_kni *kni)
596 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
597 void *phys[MAX_MBUF_BURST_NUM];
600 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
601 offsetof(struct rte_kni_mbuf, pool));
602 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
603 offsetof(struct rte_kni_mbuf, buf_addr));
604 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
605 offsetof(struct rte_kni_mbuf, next));
606 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
607 offsetof(struct rte_kni_mbuf, data_off));
608 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
609 offsetof(struct rte_kni_mbuf, data_len));
610 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
611 offsetof(struct rte_kni_mbuf, pkt_len));
612 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
613 offsetof(struct rte_kni_mbuf, ol_flags));
615 /* Check if pktmbuf pool has been configured */
616 if (kni->pktmbuf_pool == NULL) {
617 RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
621 allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \
622 & (MAX_MBUF_BURST_NUM - 1);
623 for (i = 0; i < allocq_free; i++) {
624 pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
625 if (unlikely(pkts[i] == NULL)) {
627 RTE_LOG(ERR, KNI, "Out of memory\n");
630 phys[i] = va2pa(pkts[i]);
633 /* No pkt mbuf allocated */
637 ret = kni_fifo_put(kni->alloc_q, phys, i);
639 /* Check if any mbufs not put into alloc_q, and then free them */
640 if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
643 for (j = ret; j < i; j++)
644 rte_pktmbuf_free(pkts[j]);
649 rte_kni_get(const char *name)
652 struct rte_kni_memzone_slot *it;
655 /* Note: could be improved perf-wise if necessary */
656 for (i = 0; i < kni_memzone_pool.max_ifaces; i++) {
657 it = &kni_memzone_pool.slots[i];
660 kni = it->m_ctx->addr;
661 if (strncmp(kni->name, name, RTE_KNI_NAMESIZE) == 0)
669 rte_kni_get_name(const struct rte_kni *kni)
674 static enum kni_ops_status
675 kni_check_request_register(struct rte_kni_ops *ops)
677 /* check if KNI request ops has been registered*/
679 return KNI_REQ_NO_REGISTER;
681 if((NULL == ops->change_mtu) && (NULL == ops->config_network_if))
682 return KNI_REQ_NO_REGISTER;
684 return KNI_REQ_REGISTERED;
688 rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops)
690 enum kni_ops_status req_status;
693 RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
698 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
702 req_status = kni_check_request_register(&kni->ops);
703 if ( KNI_REQ_REGISTERED == req_status) {
704 RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
708 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
713 rte_kni_unregister_handlers(struct rte_kni *kni)
716 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
720 kni->ops.change_mtu = NULL;
721 kni->ops.config_network_if = NULL;