4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/ioctl.h>
40 #include <rte_memory.h>
41 #include <rte_eal_memconfig.h>
43 #include "eal_filesystem.h"
45 #include "eal_private.h"
49 /* per-process VFIO config */
50 static struct vfio_config vfio_cfg;
52 static int vfio_type1_dma_map(int);
53 static int vfio_spapr_dma_map(int);
54 static int vfio_noiommu_dma_map(int);
56 /* IOMMU types we support */
57 static const struct vfio_iommu_type iommu_types[] = {
58 /* x86 IOMMU, otherwise known as type 1 */
59 { RTE_VFIO_TYPE1, "Type 1", &vfio_type1_dma_map},
60 /* ppc64 IOMMU, otherwise known as spapr */
61 { RTE_VFIO_SPAPR, "sPAPR", &vfio_spapr_dma_map},
63 { RTE_VFIO_NOIOMMU, "No-IOMMU", &vfio_noiommu_dma_map},
67 vfio_get_group_fd(int iommu_group_no)
71 char filename[PATH_MAX];
72 struct vfio_group *cur_grp;
74 /* check if we already have the group descriptor open */
75 for (i = 0; i < VFIO_MAX_GROUPS; i++)
76 if (vfio_cfg.vfio_groups[i].group_no == iommu_group_no)
77 return vfio_cfg.vfio_groups[i].fd;
79 /* Lets see first if there is room for a new group */
80 if (vfio_cfg.vfio_active_groups == VFIO_MAX_GROUPS) {
81 RTE_LOG(ERR, EAL, "Maximum number of VFIO groups reached!\n");
85 /* Now lets get an index for the new group */
86 for (i = 0; i < VFIO_MAX_GROUPS; i++)
87 if (vfio_cfg.vfio_groups[i].group_no == -1) {
88 cur_grp = &vfio_cfg.vfio_groups[i];
92 /* This should not happen */
93 if (i == VFIO_MAX_GROUPS) {
94 RTE_LOG(ERR, EAL, "No VFIO group free slot found\n");
97 /* if primary, try to open the group */
98 if (internal_config.process_type == RTE_PROC_PRIMARY) {
99 /* try regular group format */
100 snprintf(filename, sizeof(filename),
101 VFIO_GROUP_FMT, iommu_group_no);
102 vfio_group_fd = open(filename, O_RDWR);
103 if (vfio_group_fd < 0) {
104 /* if file not found, it's not an error */
105 if (errno != ENOENT) {
106 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
111 /* special case: try no-IOMMU path as well */
112 snprintf(filename, sizeof(filename),
113 VFIO_NOIOMMU_GROUP_FMT, iommu_group_no);
114 vfio_group_fd = open(filename, O_RDWR);
115 if (vfio_group_fd < 0) {
116 if (errno != ENOENT) {
117 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", filename,
123 /* noiommu group found */
126 cur_grp->group_no = iommu_group_no;
127 cur_grp->fd = vfio_group_fd;
128 vfio_cfg.vfio_active_groups++;
129 return vfio_group_fd;
131 /* if we're in a secondary process, request group fd from the primary
132 * process via our socket
137 socket_fd = vfio_mp_sync_connect_to_primary();
140 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
143 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_GROUP) < 0) {
144 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
148 if (vfio_mp_sync_send_request(socket_fd, iommu_group_no) < 0) {
149 RTE_LOG(ERR, EAL, " cannot send group number!\n");
153 ret = vfio_mp_sync_receive_request(socket_fd);
159 vfio_group_fd = vfio_mp_sync_receive_fd(socket_fd);
160 /* if we got the fd, store it and return it */
161 if (vfio_group_fd > 0) {
163 cur_grp->group_no = iommu_group_no;
164 cur_grp->fd = vfio_group_fd;
165 vfio_cfg.vfio_active_groups++;
166 return vfio_group_fd;
168 /* fall-through on error */
170 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
180 get_vfio_group_idx(int vfio_group_fd)
183 for (i = 0; i < VFIO_MAX_GROUPS; i++)
184 if (vfio_cfg.vfio_groups[i].fd == vfio_group_fd)
190 vfio_group_device_get(int vfio_group_fd)
194 i = get_vfio_group_idx(vfio_group_fd);
195 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
196 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
198 vfio_cfg.vfio_groups[i].devices++;
202 vfio_group_device_put(int vfio_group_fd)
206 i = get_vfio_group_idx(vfio_group_fd);
207 if (i < 0 || i > (VFIO_MAX_GROUPS - 1))
208 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
210 vfio_cfg.vfio_groups[i].devices--;
214 vfio_group_device_count(int vfio_group_fd)
218 i = get_vfio_group_idx(vfio_group_fd);
219 if (i < 0 || i > (VFIO_MAX_GROUPS - 1)) {
220 RTE_LOG(ERR, EAL, " wrong vfio_group index (%d)\n", i);
224 return vfio_cfg.vfio_groups[i].devices;
228 clear_group(int vfio_group_fd)
233 if (internal_config.process_type == RTE_PROC_PRIMARY) {
235 i = get_vfio_group_idx(vfio_group_fd);
238 vfio_cfg.vfio_groups[i].group_no = -1;
239 vfio_cfg.vfio_groups[i].fd = -1;
240 vfio_cfg.vfio_groups[i].devices = 0;
241 vfio_cfg.vfio_active_groups--;
245 /* This is just for SECONDARY processes */
246 socket_fd = vfio_mp_sync_connect_to_primary();
249 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
253 if (vfio_mp_sync_send_request(socket_fd, SOCKET_CLR_GROUP) < 0) {
254 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
259 if (vfio_mp_sync_send_request(socket_fd, vfio_group_fd) < 0) {
260 RTE_LOG(ERR, EAL, " cannot send group fd!\n");
265 ret = vfio_mp_sync_receive_request(socket_fd);
268 RTE_LOG(ERR, EAL, " BAD VFIO group fd!\n");
275 RTE_LOG(ERR, EAL, " Socket error\n");
279 RTE_LOG(ERR, EAL, " UNKNOWN reply, %d\n", ret);
286 vfio_setup_device(const char *sysfs_base, const char *dev_addr,
287 int *vfio_dev_fd, struct vfio_device_info *device_info)
289 struct vfio_group_status group_status = {
290 .argsz = sizeof(group_status)
296 /* get group number */
297 ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
299 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
304 /* if negative, something failed */
308 /* get the actual group fd */
309 vfio_group_fd = vfio_get_group_fd(iommu_group_no);
310 if (vfio_group_fd < 0)
313 /* if group_fd == 0, that means the device isn't managed by VFIO */
314 if (vfio_group_fd == 0) {
315 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver, skipping\n",
321 * at this point, we know that this group is viable (meaning, all devices
322 * are either bound to VFIO or not bound to anything)
325 /* check if the group is viable */
326 ret = ioctl(vfio_group_fd, VFIO_GROUP_GET_STATUS, &group_status);
328 RTE_LOG(ERR, EAL, " %s cannot get group status, "
329 "error %i (%s)\n", dev_addr, errno, strerror(errno));
330 close(vfio_group_fd);
331 clear_group(vfio_group_fd);
333 } else if (!(group_status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
334 RTE_LOG(ERR, EAL, " %s VFIO group is not viable!\n", dev_addr);
335 close(vfio_group_fd);
336 clear_group(vfio_group_fd);
340 /* check if group does not have a container yet */
341 if (!(group_status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
343 /* add group to a container */
344 ret = ioctl(vfio_group_fd, VFIO_GROUP_SET_CONTAINER,
345 &vfio_cfg.vfio_container_fd);
347 RTE_LOG(ERR, EAL, " %s cannot add VFIO group to container, "
348 "error %i (%s)\n", dev_addr, errno, strerror(errno));
349 close(vfio_group_fd);
350 clear_group(vfio_group_fd);
355 * pick an IOMMU type and set up DMA mappings for container
357 * needs to be done only once, only when first group is
358 * assigned to a container and only in primary process.
359 * Note this can happen several times with the hotplug
362 if (internal_config.process_type == RTE_PROC_PRIMARY &&
363 vfio_cfg.vfio_active_groups == 1) {
364 /* select an IOMMU type which we will be using */
365 const struct vfio_iommu_type *t =
366 vfio_set_iommu_type(vfio_cfg.vfio_container_fd);
369 " %s failed to select IOMMU type\n",
371 close(vfio_group_fd);
372 clear_group(vfio_group_fd);
375 ret = t->dma_map_func(vfio_cfg.vfio_container_fd);
378 " %s DMA remapping failed, error %i (%s)\n",
379 dev_addr, errno, strerror(errno));
380 close(vfio_group_fd);
381 clear_group(vfio_group_fd);
387 /* get a file descriptor for the device */
388 *vfio_dev_fd = ioctl(vfio_group_fd, VFIO_GROUP_GET_DEVICE_FD, dev_addr);
389 if (*vfio_dev_fd < 0) {
390 /* if we cannot get a device fd, this implies a problem with
391 * the VFIO group or the container not having IOMMU configured.
394 RTE_LOG(WARNING, EAL, "Getting a vfio_dev_fd for %s failed\n",
396 close(vfio_group_fd);
397 clear_group(vfio_group_fd);
401 /* test and setup the device */
402 ret = ioctl(*vfio_dev_fd, VFIO_DEVICE_GET_INFO, device_info);
404 RTE_LOG(ERR, EAL, " %s cannot get device info, "
405 "error %i (%s)\n", dev_addr, errno,
408 close(vfio_group_fd);
409 clear_group(vfio_group_fd);
412 vfio_group_device_get(vfio_group_fd);
418 vfio_release_device(const char *sysfs_base, const char *dev_addr,
421 struct vfio_group_status group_status = {
422 .argsz = sizeof(group_status)
428 /* get group number */
429 ret = vfio_get_group_no(sysfs_base, dev_addr, &iommu_group_no);
431 RTE_LOG(WARNING, EAL, " %s not managed by VFIO driver\n",
433 /* This is an error at this point. */
437 /* get the actual group fd */
438 vfio_group_fd = vfio_get_group_fd(iommu_group_no);
439 if (vfio_group_fd <= 0) {
440 RTE_LOG(INFO, EAL, "vfio_get_group_fd failed for %s\n",
445 /* At this point we got an active group. Closing it will make the
446 * container detachment. If this is the last active group, VFIO kernel
447 * code will unset the container and the IOMMU mappings.
450 /* Closing a device */
451 if (close(vfio_dev_fd) < 0) {
452 RTE_LOG(INFO, EAL, "Error when closing vfio_dev_fd for %s\n",
457 /* An VFIO group can have several devices attached. Just when there is
458 * no devices remaining should the group be closed.
460 vfio_group_device_put(vfio_group_fd);
461 if (!vfio_group_device_count(vfio_group_fd)) {
463 if (close(vfio_group_fd) < 0) {
464 RTE_LOG(INFO, EAL, "Error when closing vfio_group_fd for %s\n",
469 if (clear_group(vfio_group_fd) < 0) {
470 RTE_LOG(INFO, EAL, "Error when clearing group for %s\n",
480 vfio_enable(const char *modname)
482 /* initialize group list */
486 for (i = 0; i < VFIO_MAX_GROUPS; i++) {
487 vfio_cfg.vfio_groups[i].fd = -1;
488 vfio_cfg.vfio_groups[i].group_no = -1;
489 vfio_cfg.vfio_groups[i].devices = 0;
492 /* inform the user that we are probing for VFIO */
493 RTE_LOG(INFO, EAL, "Probing VFIO support...\n");
495 /* check if vfio module is loaded */
496 vfio_available = rte_eal_check_module(modname);
498 /* return error directly */
499 if (vfio_available == -1) {
500 RTE_LOG(INFO, EAL, "Could not get loaded module details!\n");
504 /* return 0 if VFIO modules not loaded */
505 if (vfio_available == 0) {
506 RTE_LOG(DEBUG, EAL, "VFIO modules not loaded, "
507 "skipping VFIO support...\n");
511 vfio_cfg.vfio_container_fd = vfio_get_container_fd();
513 /* check if we have VFIO driver enabled */
514 if (vfio_cfg.vfio_container_fd != -1) {
515 RTE_LOG(NOTICE, EAL, "VFIO support initialized\n");
516 vfio_cfg.vfio_enabled = 1;
518 RTE_LOG(NOTICE, EAL, "VFIO support could not be initialized\n");
525 vfio_is_enabled(const char *modname)
527 const int mod_available = rte_eal_check_module(modname);
528 return vfio_cfg.vfio_enabled && mod_available;
531 const struct vfio_iommu_type *
532 vfio_set_iommu_type(int vfio_container_fd)
535 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
536 const struct vfio_iommu_type *t = &iommu_types[idx];
538 int ret = ioctl(vfio_container_fd, VFIO_SET_IOMMU,
541 RTE_LOG(NOTICE, EAL, " using IOMMU type %d (%s)\n",
542 t->type_id, t->name);
545 /* not an error, there may be more supported IOMMU types */
546 RTE_LOG(DEBUG, EAL, " set IOMMU type %d (%s) failed, "
547 "error %i (%s)\n", t->type_id, t->name, errno,
550 /* if we didn't find a suitable IOMMU type, fail */
555 vfio_has_supported_extensions(int vfio_container_fd)
558 unsigned idx, n_extensions = 0;
559 for (idx = 0; idx < RTE_DIM(iommu_types); idx++) {
560 const struct vfio_iommu_type *t = &iommu_types[idx];
562 ret = ioctl(vfio_container_fd, VFIO_CHECK_EXTENSION,
565 RTE_LOG(ERR, EAL, " could not get IOMMU type, "
566 "error %i (%s)\n", errno,
568 close(vfio_container_fd);
570 } else if (ret == 1) {
571 /* we found a supported extension */
574 RTE_LOG(DEBUG, EAL, " IOMMU type %d (%s) is %s\n",
576 ret ? "supported" : "not supported");
579 /* if we didn't find any supported IOMMU types, fail */
581 close(vfio_container_fd);
589 vfio_get_container_fd(void)
591 int ret, vfio_container_fd;
593 /* if we're in a primary process, try to open the container */
594 if (internal_config.process_type == RTE_PROC_PRIMARY) {
595 vfio_container_fd = open(VFIO_CONTAINER_PATH, O_RDWR);
596 if (vfio_container_fd < 0) {
597 RTE_LOG(ERR, EAL, " cannot open VFIO container, "
598 "error %i (%s)\n", errno, strerror(errno));
602 /* check VFIO API version */
603 ret = ioctl(vfio_container_fd, VFIO_GET_API_VERSION);
604 if (ret != VFIO_API_VERSION) {
606 RTE_LOG(ERR, EAL, " could not get VFIO API version, "
607 "error %i (%s)\n", errno, strerror(errno));
609 RTE_LOG(ERR, EAL, " unsupported VFIO API version!\n");
610 close(vfio_container_fd);
614 ret = vfio_has_supported_extensions(vfio_container_fd);
616 RTE_LOG(ERR, EAL, " no supported IOMMU "
617 "extensions found!\n");
621 return vfio_container_fd;
624 * if we're in a secondary process, request container fd from the
625 * primary process via our socket
629 socket_fd = vfio_mp_sync_connect_to_primary();
631 RTE_LOG(ERR, EAL, " cannot connect to primary process!\n");
634 if (vfio_mp_sync_send_request(socket_fd, SOCKET_REQ_CONTAINER) < 0) {
635 RTE_LOG(ERR, EAL, " cannot request container fd!\n");
639 vfio_container_fd = vfio_mp_sync_receive_fd(socket_fd);
640 if (vfio_container_fd < 0) {
641 RTE_LOG(ERR, EAL, " cannot get container fd!\n");
646 return vfio_container_fd;
653 vfio_get_group_no(const char *sysfs_base,
654 const char *dev_addr, int *iommu_group_no)
656 char linkname[PATH_MAX];
657 char filename[PATH_MAX];
658 char *tok[16], *group_tok, *end;
661 memset(linkname, 0, sizeof(linkname));
662 memset(filename, 0, sizeof(filename));
664 /* try to find out IOMMU group for this device */
665 snprintf(linkname, sizeof(linkname),
666 "%s/%s/iommu_group", sysfs_base, dev_addr);
668 ret = readlink(linkname, filename, sizeof(filename));
670 /* if the link doesn't exist, no VFIO for us */
674 ret = rte_strsplit(filename, sizeof(filename),
675 tok, RTE_DIM(tok), '/');
678 RTE_LOG(ERR, EAL, " %s cannot get IOMMU group\n", dev_addr);
682 /* IOMMU group is always the last token */
684 group_tok = tok[ret - 1];
686 *iommu_group_no = strtol(group_tok, &end, 10);
687 if ((end != group_tok && *end != '\0') || errno != 0) {
688 RTE_LOG(ERR, EAL, " %s error parsing IOMMU number!\n", dev_addr);
696 vfio_type1_dma_map(int vfio_container_fd)
698 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
701 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
702 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
703 struct vfio_iommu_type1_dma_map dma_map;
705 if (ms[i].addr == NULL)
708 memset(&dma_map, 0, sizeof(dma_map));
709 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
710 dma_map.vaddr = ms[i].addr_64;
711 dma_map.size = ms[i].len;
712 if (rte_eal_iova_mode() == RTE_IOVA_VA)
713 dma_map.iova = dma_map.vaddr;
715 dma_map.iova = ms[i].phys_addr;
716 dma_map.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
718 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
721 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
722 "error %i (%s)\n", errno,
732 vfio_spapr_dma_map(int vfio_container_fd)
734 const struct rte_memseg *ms = rte_eal_get_physmem_layout();
737 struct vfio_iommu_spapr_register_memory reg = {
738 .argsz = sizeof(reg),
741 struct vfio_iommu_spapr_tce_info info = {
742 .argsz = sizeof(info),
744 struct vfio_iommu_spapr_tce_create create = {
745 .argsz = sizeof(create),
747 struct vfio_iommu_spapr_tce_remove remove = {
748 .argsz = sizeof(remove),
751 /* query spapr iommu info */
752 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
754 RTE_LOG(ERR, EAL, " cannot get iommu info, "
755 "error %i (%s)\n", errno, strerror(errno));
759 /* remove default DMA of 32 bit window */
760 remove.start_addr = info.dma32_window_start;
761 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_REMOVE, &remove);
763 RTE_LOG(ERR, EAL, " cannot remove default DMA window, "
764 "error %i (%s)\n", errno, strerror(errno));
768 /* create DMA window from 0 to max(phys_addr + len) */
769 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
770 if (ms[i].addr == NULL)
773 create.window_size = RTE_MAX(create.window_size,
774 ms[i].phys_addr + ms[i].len);
777 /* sPAPR requires window size to be a power of 2 */
778 create.window_size = rte_align64pow2(create.window_size);
779 create.page_shift = __builtin_ctzll(ms->hugepage_sz);
782 ret = ioctl(vfio_container_fd, VFIO_IOMMU_SPAPR_TCE_CREATE, &create);
784 RTE_LOG(ERR, EAL, " cannot create new DMA window, "
785 "error %i (%s)\n", errno, strerror(errno));
789 if (create.start_addr != 0) {
790 RTE_LOG(ERR, EAL, " DMA window start address != 0\n");
794 /* map all DPDK segments for DMA. use 1:1 PA to IOVA mapping */
795 for (i = 0; i < RTE_MAX_MEMSEG; i++) {
796 struct vfio_iommu_type1_dma_map dma_map;
798 if (ms[i].addr == NULL)
801 reg.vaddr = (uintptr_t) ms[i].addr;
802 reg.size = ms[i].len;
803 ret = ioctl(vfio_container_fd,
804 VFIO_IOMMU_SPAPR_REGISTER_MEMORY, ®);
806 RTE_LOG(ERR, EAL, " cannot register vaddr for IOMMU, "
807 "error %i (%s)\n", errno, strerror(errno));
811 memset(&dma_map, 0, sizeof(dma_map));
812 dma_map.argsz = sizeof(struct vfio_iommu_type1_dma_map);
813 dma_map.vaddr = ms[i].addr_64;
814 dma_map.size = ms[i].len;
815 if (rte_eal_iova_mode() == RTE_IOVA_VA)
816 dma_map.iova = dma_map.vaddr;
818 dma_map.iova = ms[i].phys_addr;
819 dma_map.flags = VFIO_DMA_MAP_FLAG_READ |
820 VFIO_DMA_MAP_FLAG_WRITE;
822 ret = ioctl(vfio_container_fd, VFIO_IOMMU_MAP_DMA, &dma_map);
825 RTE_LOG(ERR, EAL, " cannot set up DMA remapping, "
826 "error %i (%s)\n", errno, strerror(errno));
836 vfio_noiommu_dma_map(int __rte_unused vfio_container_fd)
838 /* No-IOMMU mode does not need DMA mapping */
843 vfio_noiommu_is_enabled(void)
845 int fd, ret, cnt __rte_unused;
849 fd = open(VFIO_NOIOMMU_MODE, O_RDONLY);
853 cnt = read(fd, &c, 1);