1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef RTE_EXEC_ENV_LINUX
6 #error "KNI is not supported"
12 #include <sys/ioctl.h>
13 #include <linux/version.h>
15 #include <rte_spinlock.h>
16 #include <rte_string_fns.h>
17 #include <rte_ethdev.h>
18 #include <rte_malloc.h>
21 #include <rte_memzone.h>
22 #include <rte_tailq.h>
23 #include <rte_rwlock.h>
24 #include <rte_eal_memconfig.h>
25 #include <rte_kni_common.h>
26 #include "rte_kni_fifo.h"
28 #define MAX_MBUF_BURST_NUM 32
30 /* Maximum number of ring entries */
31 #define KNI_FIFO_COUNT_MAX 1024
32 #define KNI_FIFO_SIZE (KNI_FIFO_COUNT_MAX * sizeof(void *) + \
33 sizeof(struct rte_kni_fifo))
35 #define KNI_REQUEST_MBUF_NUM_MAX 32
37 #define KNI_MEM_CHECK(cond, fail) do { if (cond) goto fail; } while (0)
39 #define KNI_MZ_NAME_FMT "kni_info_%s"
40 #define KNI_TX_Q_MZ_NAME_FMT "kni_tx_%s"
41 #define KNI_RX_Q_MZ_NAME_FMT "kni_rx_%s"
42 #define KNI_ALLOC_Q_MZ_NAME_FMT "kni_alloc_%s"
43 #define KNI_FREE_Q_MZ_NAME_FMT "kni_free_%s"
44 #define KNI_REQ_Q_MZ_NAME_FMT "kni_req_%s"
45 #define KNI_RESP_Q_MZ_NAME_FMT "kni_resp_%s"
46 #define KNI_SYNC_ADDR_MZ_NAME_FMT "kni_sync_%s"
48 TAILQ_HEAD(rte_kni_list, rte_tailq_entry);
50 static struct rte_tailq_elem rte_kni_tailq = {
53 EAL_REGISTER_TAILQ(rte_kni_tailq)
59 char name[RTE_KNI_NAMESIZE]; /**< KNI interface name */
60 uint16_t group_id; /**< Group ID of KNI devices */
61 uint32_t slot_id; /**< KNI pool slot ID */
62 struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */
63 unsigned int mbuf_size; /**< mbuf size */
65 const struct rte_memzone *m_tx_q; /**< TX queue memzone */
66 const struct rte_memzone *m_rx_q; /**< RX queue memzone */
67 const struct rte_memzone *m_alloc_q;/**< Alloc queue memzone */
68 const struct rte_memzone *m_free_q; /**< Free queue memzone */
70 struct rte_kni_fifo *tx_q; /**< TX queue */
71 struct rte_kni_fifo *rx_q; /**< RX queue */
72 struct rte_kni_fifo *alloc_q; /**< Allocated mbufs queue */
73 struct rte_kni_fifo *free_q; /**< To be freed mbufs queue */
75 const struct rte_memzone *m_req_q; /**< Request queue memzone */
76 const struct rte_memzone *m_resp_q; /**< Response queue memzone */
77 const struct rte_memzone *m_sync_addr;/**< Sync addr memzone */
79 /* For request & response */
80 struct rte_kni_fifo *req_q; /**< Request queue */
81 struct rte_kni_fifo *resp_q; /**< Response queue */
82 void *sync_addr; /**< Req/Resp Mem address */
84 struct rte_kni_ops ops; /**< operations for request */
88 KNI_REQ_NO_REGISTER = 0,
92 static void kni_free_mbufs(struct rte_kni *kni);
93 static void kni_allocate_mbufs(struct rte_kni *kni);
95 static volatile int kni_fd = -1;
97 /* Shall be called before any allocation happens */
99 rte_kni_init(unsigned int max_kni_ifaces __rte_unused)
101 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
102 if (rte_eal_iova_mode() != RTE_IOVA_PA) {
103 RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n");
108 /* Check FD and open */
110 kni_fd = open("/dev/" KNI_DEVICE, O_RDWR);
113 "Can not open /dev/%s\n", KNI_DEVICE);
121 static struct rte_kni *
122 __rte_kni_get(const char *name)
125 struct rte_tailq_entry *te;
126 struct rte_kni_list *kni_list;
128 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
130 TAILQ_FOREACH(te, kni_list, next) {
132 if (strncmp(name, kni->name, RTE_KNI_NAMESIZE) == 0)
143 kni_reserve_mz(struct rte_kni *kni)
145 char mz_name[RTE_MEMZONE_NAMESIZE];
147 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_TX_Q_MZ_NAME_FMT, kni->name);
148 kni->m_tx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
149 KNI_MEM_CHECK(kni->m_tx_q == NULL, tx_q_fail);
151 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RX_Q_MZ_NAME_FMT, kni->name);
152 kni->m_rx_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
153 KNI_MEM_CHECK(kni->m_rx_q == NULL, rx_q_fail);
155 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_ALLOC_Q_MZ_NAME_FMT, kni->name);
156 kni->m_alloc_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
157 KNI_MEM_CHECK(kni->m_alloc_q == NULL, alloc_q_fail);
159 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_FREE_Q_MZ_NAME_FMT, kni->name);
160 kni->m_free_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
161 KNI_MEM_CHECK(kni->m_free_q == NULL, free_q_fail);
163 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_REQ_Q_MZ_NAME_FMT, kni->name);
164 kni->m_req_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
165 KNI_MEM_CHECK(kni->m_req_q == NULL, req_q_fail);
167 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_RESP_Q_MZ_NAME_FMT, kni->name);
168 kni->m_resp_q = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
169 KNI_MEM_CHECK(kni->m_resp_q == NULL, resp_q_fail);
171 snprintf(mz_name, RTE_MEMZONE_NAMESIZE, KNI_SYNC_ADDR_MZ_NAME_FMT, kni->name);
172 kni->m_sync_addr = rte_memzone_reserve(mz_name, KNI_FIFO_SIZE, SOCKET_ID_ANY, 0);
173 KNI_MEM_CHECK(kni->m_sync_addr == NULL, sync_addr_fail);
178 rte_memzone_free(kni->m_resp_q);
180 rte_memzone_free(kni->m_req_q);
182 rte_memzone_free(kni->m_free_q);
184 rte_memzone_free(kni->m_alloc_q);
186 rte_memzone_free(kni->m_rx_q);
188 rte_memzone_free(kni->m_tx_q);
194 kni_release_mz(struct rte_kni *kni)
196 rte_memzone_free(kni->m_tx_q);
197 rte_memzone_free(kni->m_rx_q);
198 rte_memzone_free(kni->m_alloc_q);
199 rte_memzone_free(kni->m_free_q);
200 rte_memzone_free(kni->m_req_q);
201 rte_memzone_free(kni->m_resp_q);
202 rte_memzone_free(kni->m_sync_addr);
206 rte_kni_alloc(struct rte_mempool *pktmbuf_pool,
207 const struct rte_kni_conf *conf,
208 struct rte_kni_ops *ops)
211 struct rte_kni_device_info dev_info;
213 struct rte_tailq_entry *te;
214 struct rte_kni_list *kni_list;
216 if (!pktmbuf_pool || !conf || !conf->name[0])
219 /* Check if KNI subsystem has been initialized */
221 RTE_LOG(ERR, KNI, "KNI subsystem has not been initialized. Invoke rte_kni_init() first\n");
225 rte_mcfg_tailq_write_lock();
227 kni = __rte_kni_get(conf->name);
229 RTE_LOG(ERR, KNI, "KNI already exists\n");
233 te = rte_zmalloc("KNI_TAILQ_ENTRY", sizeof(*te), 0);
235 RTE_LOG(ERR, KNI, "Failed to allocate tailq entry\n");
239 kni = rte_zmalloc("KNI", sizeof(struct rte_kni), RTE_CACHE_LINE_SIZE);
241 RTE_LOG(ERR, KNI, "KNI memory allocation failed\n");
245 strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE);
248 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
250 kni->ops.port_id = UINT16_MAX;
252 memset(&dev_info, 0, sizeof(dev_info));
253 dev_info.core_id = conf->core_id;
254 dev_info.force_bind = conf->force_bind;
255 dev_info.group_id = conf->group_id;
256 dev_info.mbuf_size = conf->mbuf_size;
257 dev_info.mtu = conf->mtu;
258 dev_info.min_mtu = conf->min_mtu;
259 dev_info.max_mtu = conf->max_mtu;
261 memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN);
263 strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE);
265 ret = kni_reserve_mz(kni);
270 kni->tx_q = kni->m_tx_q->addr;
271 kni_fifo_init(kni->tx_q, KNI_FIFO_COUNT_MAX);
272 dev_info.tx_phys = kni->m_tx_q->phys_addr;
275 kni->rx_q = kni->m_rx_q->addr;
276 kni_fifo_init(kni->rx_q, KNI_FIFO_COUNT_MAX);
277 dev_info.rx_phys = kni->m_rx_q->phys_addr;
280 kni->alloc_q = kni->m_alloc_q->addr;
281 kni_fifo_init(kni->alloc_q, KNI_FIFO_COUNT_MAX);
282 dev_info.alloc_phys = kni->m_alloc_q->phys_addr;
285 kni->free_q = kni->m_free_q->addr;
286 kni_fifo_init(kni->free_q, KNI_FIFO_COUNT_MAX);
287 dev_info.free_phys = kni->m_free_q->phys_addr;
290 kni->req_q = kni->m_req_q->addr;
291 kni_fifo_init(kni->req_q, KNI_FIFO_COUNT_MAX);
292 dev_info.req_phys = kni->m_req_q->phys_addr;
295 kni->resp_q = kni->m_resp_q->addr;
296 kni_fifo_init(kni->resp_q, KNI_FIFO_COUNT_MAX);
297 dev_info.resp_phys = kni->m_resp_q->phys_addr;
299 /* Req/Resp sync mem area */
300 kni->sync_addr = kni->m_sync_addr->addr;
301 dev_info.sync_va = kni->m_sync_addr->addr;
302 dev_info.sync_phys = kni->m_sync_addr->phys_addr;
304 kni->pktmbuf_pool = pktmbuf_pool;
305 kni->group_id = conf->group_id;
306 kni->mbuf_size = conf->mbuf_size;
308 dev_info.iova_mode = (rte_eal_iova_mode() == RTE_IOVA_VA) ? 1 : 0;
310 ret = ioctl(kni_fd, RTE_KNI_IOCTL_CREATE, &dev_info);
316 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
317 TAILQ_INSERT_TAIL(kni_list, te, next);
319 rte_mcfg_tailq_write_unlock();
321 /* Allocate mbufs and then put them into alloc_q */
322 kni_allocate_mbufs(kni);
333 rte_mcfg_tailq_write_unlock();
339 kni_free_fifo(struct rte_kni_fifo *fifo)
342 struct rte_mbuf *pkt;
345 ret = kni_fifo_get(fifo, (void **)&pkt, 1);
347 rte_pktmbuf_free(pkt);
352 va2pa(struct rte_mbuf *m)
354 return (void *)((unsigned long)m -
355 ((unsigned long)m->buf_addr -
356 (unsigned long)m->buf_iova));
360 va2pa_all(struct rte_mbuf *mbuf)
362 void *phy_mbuf = va2pa(mbuf);
363 struct rte_mbuf *next = mbuf->next;
365 mbuf->next = va2pa(next);
373 obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj,
374 unsigned obj_idx __rte_unused)
376 struct rte_mbuf *m = obj;
377 void *mbuf_phys = opaque;
379 if (va2pa(m) == mbuf_phys)
384 kni_free_fifo_phy(struct rte_mempool *mp, struct rte_kni_fifo *fifo)
390 ret = kni_fifo_get(fifo, &mbuf_phys, 1);
392 rte_mempool_obj_iter(mp, obj_free, mbuf_phys);
397 rte_kni_release(struct rte_kni *kni)
399 struct rte_tailq_entry *te;
400 struct rte_kni_list *kni_list;
401 struct rte_kni_device_info dev_info;
407 kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list);
409 rte_mcfg_tailq_write_lock();
411 TAILQ_FOREACH(te, kni_list, next) {
419 strlcpy(dev_info.name, kni->name, sizeof(dev_info.name));
420 if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) {
421 RTE_LOG(ERR, KNI, "Fail to release kni device\n");
425 TAILQ_REMOVE(kni_list, te, next);
427 rte_mcfg_tailq_write_unlock();
429 /* mbufs in all fifo should be released, except request/response */
431 /* wait until all rxq packets processed by kernel */
432 while (kni_fifo_count(kni->rx_q) && retry--)
435 if (kni_fifo_count(kni->rx_q))
436 RTE_LOG(ERR, KNI, "Fail to free all Rx-q items\n");
438 kni_free_fifo_phy(kni->pktmbuf_pool, kni->alloc_q);
439 kni_free_fifo(kni->tx_q);
440 kni_free_fifo(kni->free_q);
451 rte_mcfg_tailq_write_unlock();
456 /* default callback for request of configuring device mac address */
458 kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
462 if (!rte_eth_dev_is_valid_port(port_id)) {
463 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
467 RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id);
469 ret = rte_eth_dev_default_mac_addr_set(port_id,
470 (struct rte_ether_addr *)mac_addr);
472 RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n",
478 /* default callback for request of configuring promiscuous mode */
480 kni_config_promiscusity(uint16_t port_id, uint8_t to_on)
484 if (!rte_eth_dev_is_valid_port(port_id)) {
485 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
489 RTE_LOG(INFO, KNI, "Configure promiscuous mode of %d to %d\n",
493 ret = rte_eth_promiscuous_enable(port_id);
495 ret = rte_eth_promiscuous_disable(port_id);
499 "Failed to %s promiscuous mode for port %u: %s\n",
500 to_on ? "enable" : "disable", port_id,
506 /* default callback for request of configuring allmulticast mode */
508 kni_config_allmulticast(uint16_t port_id, uint8_t to_on)
510 if (!rte_eth_dev_is_valid_port(port_id)) {
511 RTE_LOG(ERR, KNI, "Invalid port id %d\n", port_id);
515 RTE_LOG(INFO, KNI, "Configure allmulticast mode of %d to %d\n",
519 rte_eth_allmulticast_enable(port_id);
521 rte_eth_allmulticast_disable(port_id);
527 rte_kni_handle_request(struct rte_kni *kni)
530 struct rte_kni_request *req = NULL;
535 /* Get request mbuf */
536 ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
538 return 0; /* It is OK of can not getting the request mbuf */
540 if (req != kni->sync_addr) {
541 RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
545 /* Analyze the request and call the relevant actions for it */
546 switch (req->req_id) {
547 case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
548 if (kni->ops.change_mtu)
549 req->result = kni->ops.change_mtu(kni->ops.port_id,
552 case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
553 if (kni->ops.config_network_if)
554 req->result = kni->ops.config_network_if(kni->ops.port_id,
557 case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */
558 if (kni->ops.config_mac_address)
559 req->result = kni->ops.config_mac_address(
560 kni->ops.port_id, req->mac_addr);
561 else if (kni->ops.port_id != UINT16_MAX)
562 req->result = kni_config_mac_address(
563 kni->ops.port_id, req->mac_addr);
565 case RTE_KNI_REQ_CHANGE_PROMISC: /* Change PROMISCUOUS MODE */
566 if (kni->ops.config_promiscusity)
567 req->result = kni->ops.config_promiscusity(
568 kni->ops.port_id, req->promiscusity);
569 else if (kni->ops.port_id != UINT16_MAX)
570 req->result = kni_config_promiscusity(
571 kni->ops.port_id, req->promiscusity);
573 case RTE_KNI_REQ_CHANGE_ALLMULTI: /* Change ALLMULTICAST MODE */
574 if (kni->ops.config_allmulticast)
575 req->result = kni->ops.config_allmulticast(
576 kni->ops.port_id, req->allmulti);
577 else if (kni->ops.port_id != UINT16_MAX)
578 req->result = kni_config_allmulticast(
579 kni->ops.port_id, req->allmulti);
582 RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
583 req->result = -EINVAL;
587 /* Construct response mbuf and put it back to resp_q */
588 ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
590 RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
591 return -1; /* It is an error of can't putting the mbuf back */
598 rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
600 num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num);
601 void *phy_mbufs[num];
605 for (i = 0; i < num; i++)
606 phy_mbufs[i] = va2pa_all(mbufs[i]);
608 ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);
610 /* Get mbufs from free_q and then free them */
617 rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num)
619 unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num);
621 /* If buffers removed, allocate mbufs and then put them into alloc_q */
623 kni_allocate_mbufs(kni);
629 kni_free_mbufs(struct rte_kni *kni)
632 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
634 ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
635 if (likely(ret > 0)) {
636 for (i = 0; i < ret; i++)
637 rte_pktmbuf_free(pkts[i]);
642 kni_allocate_mbufs(struct rte_kni *kni)
645 struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
646 void *phys[MAX_MBUF_BURST_NUM];
649 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
650 offsetof(struct rte_kni_mbuf, pool));
651 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
652 offsetof(struct rte_kni_mbuf, buf_addr));
653 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
654 offsetof(struct rte_kni_mbuf, next));
655 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
656 offsetof(struct rte_kni_mbuf, data_off));
657 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
658 offsetof(struct rte_kni_mbuf, data_len));
659 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
660 offsetof(struct rte_kni_mbuf, pkt_len));
661 RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
662 offsetof(struct rte_kni_mbuf, ol_flags));
664 /* Check if pktmbuf pool has been configured */
665 if (kni->pktmbuf_pool == NULL) {
666 RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
670 allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1)
671 & (MAX_MBUF_BURST_NUM - 1);
672 for (i = 0; i < allocq_free; i++) {
673 pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
674 if (unlikely(pkts[i] == NULL)) {
676 RTE_LOG(ERR, KNI, "Out of memory\n");
679 phys[i] = va2pa(pkts[i]);
682 /* No pkt mbuf allocated */
686 ret = kni_fifo_put(kni->alloc_q, phys, i);
688 /* Check if any mbufs not put into alloc_q, and then free them */
689 if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
692 for (j = ret; j < i; j++)
693 rte_pktmbuf_free(pkts[j]);
698 rte_kni_get(const char *name)
702 if (name == NULL || name[0] == '\0')
705 rte_mcfg_tailq_read_lock();
707 kni = __rte_kni_get(name);
709 rte_mcfg_tailq_read_unlock();
715 rte_kni_get_name(const struct rte_kni *kni)
720 static enum kni_ops_status
721 kni_check_request_register(struct rte_kni_ops *ops)
723 /* check if KNI request ops has been registered*/
725 return KNI_REQ_NO_REGISTER;
727 if (ops->change_mtu == NULL
728 && ops->config_network_if == NULL
729 && ops->config_mac_address == NULL
730 && ops->config_promiscusity == NULL
731 && ops->config_allmulticast == NULL)
732 return KNI_REQ_NO_REGISTER;
734 return KNI_REQ_REGISTERED;
738 rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops)
740 enum kni_ops_status req_status;
743 RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n");
748 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
752 req_status = kni_check_request_register(&kni->ops);
753 if (req_status == KNI_REQ_REGISTERED) {
754 RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n");
758 memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops));
763 rte_kni_unregister_handlers(struct rte_kni *kni)
766 RTE_LOG(ERR, KNI, "Invalid kni info.\n");
770 memset(&kni->ops, 0, sizeof(struct rte_kni_ops));
776 rte_kni_update_link(struct rte_kni *kni, unsigned int linkup)
780 const char *new_carrier;
787 snprintf(path, sizeof(path), "/sys/devices/virtual/net/%s/carrier",
790 fd = open(path, O_RDWR);
792 RTE_LOG(ERR, KNI, "Failed to open file: %s.\n", path);
796 ret = read(fd, old_carrier, 2);
801 old_linkup = (old_carrier[0] == '1');
803 new_carrier = linkup ? "1" : "0";
804 ret = write(fd, new_carrier, 1);
806 RTE_LOG(ERR, KNI, "Failed to write file: %s.\n", path);