1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef RTE_EXEC_ENV_LINUX
6 #error "KNI is not supported"
12 #include <sys/ioctl.h>
13 #include <linux/version.h>
15 #include <rte_string_fns.h>
16 #include <rte_ethdev.h>
17 #include <rte_malloc.h>
20 #include <rte_memzone.h>
21 #include <rte_tailq.h>
22 #include <rte_eal_memconfig.h>
23 #include <rte_kni_common.h>
24 #include "rte_kni_fifo.h"
26 #define MAX_MBUF_BURST_NUM 32
28 /* Maximum number of ring entries */
29 #define KNI_FIFO_COUNT_MAX 1024
30 #define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
31 sizeof(struct rte_kni_fifo))
33 #define KNI_REQUEST_MBUF_NUM_MAX 32
35 #define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
37 #define KNI_MZ_NAME_FMT "kni_info_%s"
38 #define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
39 #define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
40 #define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
41 #define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
42 #define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
43 #define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
44 #define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
46 TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
48 static struct rte_tailq_elem rte_kni_tailq = {
51 EAL_REGISTER_TAILQ(rte_kni_tailq)
57 char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
58 uint16_t group_id; /**< Group ID of KNI devices */
59 uint32_t slot_id; /**< KNI pool slot ID */
60 struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
61 unsigned int mbuf_size; /**< mbuf size */
63 const struct rte_memzone *m_tx_q; /**< TX queue memzone */
64 const struct rte_memzone *m_rx_q; /**< RX queue memzone */
65 const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
66 const struct rte_memzone *m_free_q; /**< Free queue memzone */
68 struct rte_kni_fifo *tx_q; /**< TX queue */
69 struct rte_kni_fifo *rx_q; /**< RX queue */
70 struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
71 struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
73 const struct rte_memzone *m_req_q; /**< Request queue memzone */
74 const struct rte_memzone *m_resp_q; /**< Response queue memzone */
75 const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
77 /* For request & response */
78 struct rte_kni_fifo *req_q; /**< Request queue */
79 struct rte_kni_fifo *resp_q; /**< Response queue */
80 void *sync_addr; /**< Req/Resp Mem address */
82 struct rte_kni_ops ops; /**< operations for request */
86 KNI_REQ_NO_REGISTER = 0,
90 static void kni_free_mbufs(struct rte_kni *kni);
91 static void kni_allocate_mbufs(struct rte_kni *kni);
93 static volatile int kni_fd = -1;
95 /* Shall be called before any allocation happens */
97 rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
100 if (rte_eal_iova_mode() != RTE_IOVA_PA) {
101 RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
106 /* Check FD and open */
108 kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
111 "Can not open /dev/%s\n", KNI_DEVICE);
119 static struct rte_kni *
120 __rte_kni_get(const char *name)
123 struct rte_tailq_entry *te;
124 struct rte_kni_list *kni_list;
126 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
128 TAILQ_FOREACH(te, kni_list, next) {
130 if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
141 kni_reserve_mz(struct rte_kni *kni)
143 char mz_name[RTE_MEMZONE_NAMESIZE];
145 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
146 kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
147 RTE_MEMZONE_IOVA_CONTIG);
148 KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
150 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
151 kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
152 RTE_MEMZONE_IOVA_CONTIG);
153 KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
155 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
156 kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
157 RTE_MEMZONE_IOVA_CONTIG);
158 KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
160 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
161 kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
162 RTE_MEMZONE_IOVA_CONTIG);
163 KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
165 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
166 kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
167 RTE_MEMZONE_IOVA_CONTIG);
168 KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
170 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
171 kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
172 RTE_MEMZONE_IOVA_CONTIG);
173 KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
175 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
176 kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY,
177 RTE_MEMZONE_IOVA_CONTIG);
178 KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
183 rte_memzone_free(kni->m_resp_q);
185 rte_memzone_free(kni->m_req_q);
187 rte_memzone_free(kni->m_free_q);
189 rte_memzone_free(kni->m_alloc_q);
191 rte_memzone_free(kni->m_rx_q);
193 rte_memzone_free(kni->m_tx_q);
199 kni_release_mz(struct rte_kni *kni)
201 rte_memzone_free(kni->m_tx_q);
202 rte_memzone_free(kni->m_rx_q);
203 rte_memzone_free(kni->m_alloc_q);
204 rte_memzone_free(kni->m_free_q);
205 rte_memzone_free(kni->m_req_q);
206 rte_memzone_free(kni->m_resp_q);
207 rte_memzone_free(kni->m_sync_addr);
211 rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
212 const struct rte_kni_conf *conf,
213 struct rte_kni_ops *ops)
216 struct rte_kni_device_info dev_info;
218 struct rte_tailq_entry *te;
219 struct rte_kni_list *kni_list;
221 if (!pktmbuf_pool || !conf || !conf->name[0])
224 /* Check if KNI subsystem has been initialized */
226 RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
230 rte_mcfg_tailq_write_lock();
232 kni = __rte_kni_get(conf->name);
234 RTE_LOG(ERR, KNI, "KNI already exists\n");
238 te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
240 RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
244 kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
246 RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
250 strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE);
253 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
255 kni->ops.port_id = UINT16_MAX;
257 memset(&dev_info, 0, sizeof(dev_info));
258 dev_info.core_id = conf->core_id;
259 dev_info.force_bind = conf->force_bind;
260 dev_info.group_id = conf->group_id;
261 dev_info.mbuf_size = conf->mbuf_size;
262 dev_info.mtu = conf->mtu;
263 dev_info.min_mtu = conf->min_mtu;
264 dev_info.max_mtu = conf->max_mtu;
266 memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
268 strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
270 ret = kni_reserve_mz(kni);
275 kni->tx_q = kni->m_tx_q->addr;
276 kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
277 dev_info.tx_phys = kni->m_tx_q->iova;
280 kni->rx_q = kni->m_rx_q->addr;
281 kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
282 dev_info.rx_phys = kni->m_rx_q->iova;
285 kni->alloc_q = kni->m_alloc_q->addr;
286 kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
287 dev_info.alloc_phys = kni->m_alloc_q->iova;
290 kni->free_q = kni->m_free_q->addr;
291 kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
292 dev_info.free_phys = kni->m_free_q->iova;
295 kni->req_q = kni->m_req_q->addr;
296 kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
297 dev_info.req_phys = kni->m_req_q->iova;
300 kni->resp_q = kni->m_resp_q->addr;
301 kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
302 dev_info.resp_phys = kni->m_resp_q->iova;
304 /* Req/Resp sync mem area */
305 kni->sync_addr = kni->m_sync_addr->addr;
306 dev_info.sync_va = kni->m_sync_addr->addr;
307 dev_info.sync_phys = kni->m_sync_addr->iova;
309 kni->pktmbuf_pool = pktmbuf_pool;
310 kni->group_id = conf->group_id;
311 kni->mbuf_size = conf->mbuf_size;
313 dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0;
315 ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
321 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
322 TAILQ_INSERT_TAIL(kni_list, te, next);
324 rte_mcfg_tailq_write_unlock();
326 /* Allocate mbufs and then put them into alloc_q */
327 kni_allocate_mbufs(kni);
338 rte_mcfg_tailq_write_unlock();
344 kni_free_fifo(struct rte_kni_fifo *fifo)
347 struct rte_mbuf *pkt;
350 ret = kni_fifo_get(fifo, (void **)&pkt, 1);
352 rte_pktmbuf_free(pkt);
357 va2pa(struct rte_mbuf *m)
359 return (void *)((unsigned long)m -
360 ((unsigned long)m->buf_addr -
361 (unsigned long)m->buf_iova));
365 va2pa_all(struct rte_mbuf *mbuf)
367 void *phy_mbuf = va2pa(mbuf);
368 struct rte_mbuf *next = mbuf->next;
370 mbuf->next = va2pa(next);
378 obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
379 unsigned obj_idx __rte_unused)
381 struct rte_mbuf *m = obj;
382 void *mbuf_phys = opaque;
384 if (va2pa(m) == mbuf_phys)
389 kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
395 ret = kni_fifo_get(fifo, &mbuf_phys, 1);
397 rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
402 rte_kni_release(struct rte_kni *kni)
404 struct rte_tailq_entry *te;
405 struct rte_kni_list *kni_list;
406 struct rte_kni_device_info dev_info;
412 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
414 rte_mcfg_tailq_write_lock();
416 TAILQ_FOREACH(te, kni_list, next) {
424 strlcpy(dev_info.name, kni->name, sizeof(dev_info.name));
425 if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
426 RTE_LOG(ERR, KNI, "Fail to release kni device\n");
430 TAILQ_REMOVE(kni_list, te, next);
432 rte_mcfg_tailq_write_unlock();
434 /* mbufs in all fifo should be released, except request/response */
436 /* wait until all rxq packets processed by kernel */
437 while (kni_fifo_count(kni->rx_q) && retry--)
440 if (kni_fifo_count(kni->rx_q))
441 RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
443 kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
444 kni_free_fifo(kni->tx_q);
445 kni_free_fifo(kni->free_q);
456 rte_mcfg_tailq_write_unlock();
461 /* default callback for request of configuring device mac address */
463 kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
467 if (!rte_eth_dev_is_valid_port(port_id)) {
468 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
472 RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id);
474 ret = rte_eth_dev_default_mac_addr_set(port_id,
475 (struct rte_ether_addr *)mac_addr);
477 RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n",
483 /* default callback for request of configuring promiscuous mode */
485 kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
489 if (!rte_eth_dev_is_valid_port(port_id)) {
490 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
494 RTE_LOG(INFO, KNI, "Configure promiscuous mode of %d to %d\n",
498 ret = rte_eth_promiscuous_enable(port_id);
500 ret = rte_eth_promiscuous_disable(port_id);
504 "Failed to %s promiscuous mode for port %u: %s\n",
505 to_on ? "enable" : "disable", port_id,
511 /* default callback for request of configuring allmulticast mode */
513 kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
517 if (!rte_eth_dev_is_valid_port(port_id)) {
518 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
522 RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
526 ret = rte_eth_allmulticast_enable(port_id);
528 ret = rte_eth_allmulticast_disable(port_id);
531 "Failed to %s allmulticast mode for port %u: %s\n",
532 to_on ? "enable" : "disable", port_id,
539 rte_kni_handle_request(struct rte_kni *kni)
542 struct rte_kni_request *req = NULL;
547 /* Get request mbuf */
548 ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
550 return 0; /* It is OK of can not getting the request mbuf */
552 if (req != kni->sync_addr) {
553 RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
557 /* Analyze the request and call the relevant actions for it */
558 switch (req->req_id) {
559 case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
560 if (kni->ops.change_mtu)
561 req->result = kni->ops.change_mtu(kni->ops.port_id,
564 case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
565 if (kni->ops.config_network_if)
566 req->result = kni->ops.config_network_if(kni->ops.port_id,
569 case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */
570 if (kni->ops.config_mac_address)
571 req->result = kni->ops.config_mac_address(
572 kni->ops.port_id, req->mac_addr);
573 else if (kni->ops.port_id != UINT16_MAX)
574 req->result = kni_config_mac_address(
575 kni->ops.port_id, req->mac_addr);
577 case RTE_KNI_REQ_CHANGE_PROMISC: /* Change PROMISCUOUS MODE */
578 if (kni->ops.config_promiscusity)
579 req->result = kni->ops.config_promiscusity(
580 kni->ops.port_id, req->promiscusity);
581 else if (kni->ops.port_id != UINT16_MAX)
582 req->result = kni_config_promiscusity(
583 kni->ops.port_id, req->promiscusity);
585 case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
586 if (kni->ops.config_allmulticast)
587 req->result = kni->ops.config_allmulticast(
588 kni->ops.port_id, req->allmulti);
589 else if (kni->ops.port_id != UINT16_MAX)
590 req->result = kni_config_allmulticast(
591 kni->ops.port_id, req->allmulti);
594 RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
595 req->result = -EINVAL;
599 /* if needed, construct response buffer and put it back to resp_q */
601 ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
605 RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
606 return -1; /* It is an error of can't putting the mbuf back */
613 rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
615 num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
616 void *phy_mbufs[num];
620 for (i = 0; i < num; i++)
621 phy_mbufs[i] = va2pa_all(mbufs[i]);
623 ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
625 /* Get mbufs from free_q and then free them */
632 rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
634 unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
636 /* If buffers removed, allocate mbufs and then put them into alloc_q */
638 kni_allocate_mbufs(kni);
644 kni_free_mbufs(struct rte_kni *kni)
647 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
649 ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
650 if (likely(ret > 0)) {
651 for (i = 0; i < ret; i++)
652 rte_pktmbuf_free(pkts[i]);
657 kni_allocate_mbufs(struct rte_kni *kni)
660 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
661 void *phys[MAX_MBUF_BURST_NUM];
664 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
665 offsetof(struct rte_kni_mbuf, pool));
666 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
667 offsetof(struct rte_kni_mbuf, buf_addr));
668 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
669 offsetof(struct rte_kni_mbuf, next));
670 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
671 offsetof(struct rte_kni_mbuf, data_off));
672 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
673 offsetof(struct rte_kni_mbuf, data_len));
674 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
675 offsetof(struct rte_kni_mbuf, pkt_len));
676 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
677 offsetof(struct rte_kni_mbuf, ol_flags));
679 /* Check if pktmbuf pool has been configured */
680 if (kni->pktmbuf_pool == NULL) {
681 RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
685 allocq_free = kni_fifo_free_count(kni->alloc_q);
686 allocq_free = (allocq_free > MAX_MBUF_BURST_NUM) ?
687 MAX_MBUF_BURST_NUM : allocq_free;
688 for (i = 0; i < allocq_free; i++) {
689 pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
690 if (unlikely(pkts[i] == NULL)) {
692 RTE_LOG(ERR, KNI, "Out of memory\n");
695 phys[i] = va2pa(pkts[i]);
698 /* No pkt mbuf allocated */
702 ret = kni_fifo_put(kni->alloc_q, phys, i);
704 /* Check if any mbufs not put into alloc_q, and then free them */
705 if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
708 for (j = ret; j < i; j++)
709 rte_pktmbuf_free(pkts[j]);
714 rte_kni_get(const char *name)
718 if (name == NULL || name[0] == '\0')
721 rte_mcfg_tailq_read_lock();
723 kni = __rte_kni_get(name);
725 rte_mcfg_tailq_read_unlock();
731 rte_kni_get_name(const struct rte_kni *kni)
736 static enum kni_ops_status
737 kni_check_request_register(struct rte_kni_ops *ops)
739 /* check if KNI request ops has been registered*/
741 return KNI_REQ_NO_REGISTER;
743 if (ops->change_mtu == NULL
744 && ops->config_network_if == NULL
745 && ops->config_mac_address == NULL
746 && ops->config_promiscusity == NULL
747 && ops->config_allmulticast == NULL)
748 return KNI_REQ_NO_REGISTER;
750 return KNI_REQ_REGISTERED;
754 rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops)
756 enum kni_ops_status req_status;
759 RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
764 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
768 req_status = kni_check_request_register(&kni->ops);
769 if (req_status == KNI_REQ_REGISTERED) {
770 RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
774 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
779 rte_kni_unregister_handlers(struct rte_kni *kni)
782 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
786 memset(&kni->ops, 0, sizeof(struct rte_kni_ops));
792 rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
796 const char *new_carrier;
803 snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier",
806 fd = open(path, O_RDWR);
808 RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path);
812 ret = read(fd, old_carrier, 2);
817 old_linkup = (old_carrier[0] == '1');
819 if (old_linkup == (int)linkup)
822 new_carrier = linkup ? "1" : "0";
823 ret = write(fd, new_carrier, 1);
825 RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path);