724b5bd004c09247faef8cdcb463dc59e3798ab5
[dpdk.git] / drivers / crypto / ccp / ccp_dev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <sys/file.h>
13 #include <unistd.h>
14
15 #include <rte_hexdump.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_memory.h>
19 #include <rte_spinlock.h>
20 #include <rte_string_fns.h>
21
22 #include "ccp_dev.h"
23 #include "ccp_pci.h"
24 #include "ccp_pmd_private.h"
25
26 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
27 static int ccp_dev_id;
28
29 static const struct rte_memzone *
30 ccp_queue_dma_zone_reserve(const char *queue_name,
31                            uint32_t queue_size,
32                            int socket_id)
33 {
34         const struct rte_memzone *mz;
35
36         mz = rte_memzone_lookup(queue_name);
37         if (mz != 0) {
38                 if (((size_t)queue_size <= mz->len) &&
39                     ((socket_id == SOCKET_ID_ANY) ||
40                      (socket_id == mz->socket_id))) {
41                         CCP_LOG_INFO("re-use memzone already "
42                                      "allocated for %s", queue_name);
43                         return mz;
44                 }
45                 CCP_LOG_ERR("Incompatible memzone already "
46                             "allocated %s, size %u, socket %d. "
47                             "Requested size %u, socket %u",
48                             queue_name, (uint32_t)mz->len,
49                             mz->socket_id, queue_size, socket_id);
50                 return NULL;
51         }
52
53         CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
54                      queue_name, queue_size, socket_id);
55
56         return rte_memzone_reserve_aligned(queue_name, queue_size,
57                         socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
58 }
59
60 /* bitmap support apis */
61 static inline void
62 ccp_set_bit(unsigned long *bitmap, int n)
63 {
64         __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
65 }
66
67 static inline void
68 ccp_clear_bit(unsigned long *bitmap, int n)
69 {
70         __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
71 }
72
73 static inline uint32_t
74 ccp_get_bit(unsigned long *bitmap, int n)
75 {
76         return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
77 }
78
79
80 static inline uint32_t
81 ccp_ffz(unsigned long word)
82 {
83         unsigned long first_zero;
84
85         first_zero = __builtin_ffsl(~word);
86         return first_zero ? (first_zero - 1) :
87                 BITS_PER_WORD;
88 }
89
90 static inline uint32_t
91 ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
92 {
93         uint32_t i;
94         uint32_t nwords = 0;
95
96         nwords = (limit - 1) / BITS_PER_WORD + 1;
97         for (i = 0; i < nwords; i++) {
98                 if (addr[i] == 0UL)
99                         return i * BITS_PER_WORD;
100                 if (addr[i] < ~(0UL))
101                         break;
102         }
103         return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
104 }
105
106 static void
107 ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
108 {
109         unsigned long *p = map + WORD_OFFSET(start);
110         const unsigned int size = start + len;
111         int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
112         unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
113
114         while (len - bits_to_set >= 0) {
115                 *p |= mask_to_set;
116                 len -= bits_to_set;
117                 bits_to_set = BITS_PER_WORD;
118                 mask_to_set = ~0UL;
119                 p++;
120         }
121         if (len) {
122                 mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
123                 *p |= mask_to_set;
124         }
125 }
126
127 static void
128 ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
129 {
130         unsigned long *p = map + WORD_OFFSET(start);
131         const unsigned int size = start + len;
132         int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
133         unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
134
135         while (len - bits_to_clear >= 0) {
136                 *p &= ~mask_to_clear;
137                 len -= bits_to_clear;
138                 bits_to_clear = BITS_PER_WORD;
139                 mask_to_clear = ~0UL;
140                 p++;
141         }
142         if (len) {
143                 mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
144                 *p &= ~mask_to_clear;
145         }
146 }
147
148
149 static unsigned long
150 _ccp_find_next_bit(const unsigned long *addr,
151                    unsigned long nbits,
152                    unsigned long start,
153                    unsigned long invert)
154 {
155         unsigned long tmp;
156
157         if (!nbits || start >= nbits)
158                 return nbits;
159
160         tmp = addr[start / BITS_PER_WORD] ^ invert;
161
162         /* Handle 1st word. */
163         tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
164         start = ccp_round_down(start, BITS_PER_WORD);
165
166         while (!tmp) {
167                 start += BITS_PER_WORD;
168                 if (start >= nbits)
169                         return nbits;
170
171                 tmp = addr[start / BITS_PER_WORD] ^ invert;
172         }
173
174         return RTE_MIN(start + (ffs(tmp) - 1), nbits);
175 }
176
177 static unsigned long
178 ccp_find_next_bit(const unsigned long *addr,
179                   unsigned long size,
180                   unsigned long offset)
181 {
182         return _ccp_find_next_bit(addr, size, offset, 0UL);
183 }
184
185 static unsigned long
186 ccp_find_next_zero_bit(const unsigned long *addr,
187                        unsigned long size,
188                        unsigned long offset)
189 {
190         return _ccp_find_next_bit(addr, size, offset, ~0UL);
191 }
192
193 /**
194  * bitmap_find_next_zero_area - find a contiguous aligned zero area
195  * @map: The address to base the search on
196  * @size: The bitmap size in bits
197  * @start: The bitnumber to start searching at
198  * @nr: The number of zeroed bits we're looking for
199  */
200 static unsigned long
201 ccp_bitmap_find_next_zero_area(unsigned long *map,
202                                unsigned long size,
203                                unsigned long start,
204                                unsigned int nr)
205 {
206         unsigned long index, end, i;
207
208 again:
209         index = ccp_find_next_zero_bit(map, size, start);
210
211         end = index + nr;
212         if (end > size)
213                 return end;
214         i = ccp_find_next_bit(map, end, index);
215         if (i < end) {
216                 start = i + 1;
217                 goto again;
218         }
219         return index;
220 }
221
222 static uint32_t
223 ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
224 {
225         struct ccp_device *ccp;
226         int start;
227
228         /* First look at the map for the queue */
229         if (cmd_q->lsb >= 0) {
230                 start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
231                                                                  LSB_SIZE, 0,
232                                                                  count);
233                 if (start < LSB_SIZE) {
234                         ccp_bitmap_set(cmd_q->lsbmap, start, count);
235                         return start + cmd_q->lsb * LSB_SIZE;
236                 }
237         }
238
239         /* try to get an entry from the shared blocks */
240         ccp = cmd_q->dev;
241
242         rte_spinlock_lock(&ccp->lsb_lock);
243
244         start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
245                                                     MAX_LSB_CNT * LSB_SIZE,
246                                                     0, count);
247         if (start <= MAX_LSB_CNT * LSB_SIZE) {
248                 ccp_bitmap_set(ccp->lsbmap, start, count);
249                 rte_spinlock_unlock(&ccp->lsb_lock);
250                 return start * LSB_ITEM_SIZE;
251         }
252         CCP_LOG_ERR("NO LSBs available");
253
254         rte_spinlock_unlock(&ccp->lsb_lock);
255
256         return 0;
257 }
258
259 static void __rte_unused
260 ccp_lsb_free(struct ccp_queue *cmd_q,
261              unsigned int start,
262              unsigned int count)
263 {
264         int lsbno = start / LSB_SIZE;
265
266         if (!start)
267                 return;
268
269         if (cmd_q->lsb == lsbno) {
270                 /* An entry from the private LSB */
271                 ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
272         } else {
273                 /* From the shared LSBs */
274                 struct ccp_device *ccp = cmd_q->dev;
275
276                 rte_spinlock_lock(&ccp->lsb_lock);
277                 ccp_bitmap_clear(ccp->lsbmap, start, count);
278                 rte_spinlock_unlock(&ccp->lsb_lock);
279         }
280 }
281
282 static int
283 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
284 {
285         int q_mask = 1 << cmd_q->id;
286         int weight = 0;
287         int j;
288
289         /* Build a bit mask to know which LSBs
290          * this queue has access to.
291          * Don't bother with segment 0
292          * as it has special
293          * privileges.
294          */
295         cmd_q->lsbmask = 0;
296         status >>= LSB_REGION_WIDTH;
297         for (j = 1; j < MAX_LSB_CNT; j++) {
298                 if (status & q_mask)
299                         ccp_set_bit(&cmd_q->lsbmask, j);
300
301                 status >>= LSB_REGION_WIDTH;
302         }
303
304         for (j = 0; j < MAX_LSB_CNT; j++)
305                 if (ccp_get_bit(&cmd_q->lsbmask, j))
306                         weight++;
307
308         printf("Queue %d can access %d LSB regions  of mask  %lu\n",
309                (int)cmd_q->id, weight, cmd_q->lsbmask);
310
311         return weight ? 0 : -EINVAL;
312 }
313
314 static int
315 ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
316                              int lsb_cnt, int n_lsbs,
317                              unsigned long *lsb_pub)
318 {
319         unsigned long qlsb = 0;
320         int bitno = 0;
321         int qlsb_wgt = 0;
322         int i, j;
323
324         /* For each queue:
325          * If the count of potential LSBs available to a queue matches the
326          * ordinal given to us in lsb_cnt:
327          * Copy the mask of possible LSBs for this queue into "qlsb";
328          * For each bit in qlsb, see if the corresponding bit in the
329          * aggregation mask is set; if so, we have a match.
330          *     If we have a match, clear the bit in the aggregation to
331          *     mark it as no longer available.
332          *     If there is no match, clear the bit in qlsb and keep looking.
333          */
334         for (i = 0; i < ccp->cmd_q_count; i++) {
335                 struct ccp_queue *cmd_q = &ccp->cmd_q[i];
336
337                 qlsb_wgt = 0;
338                 for (j = 0; j < MAX_LSB_CNT; j++)
339                         if (ccp_get_bit(&cmd_q->lsbmask, j))
340                                 qlsb_wgt++;
341
342                 if (qlsb_wgt == lsb_cnt) {
343                         qlsb = cmd_q->lsbmask;
344
345                         bitno = ffs(qlsb) - 1;
346                         while (bitno < MAX_LSB_CNT) {
347                                 if (ccp_get_bit(lsb_pub, bitno)) {
348                                         /* We found an available LSB
349                                          * that this queue can access
350                                          */
351                                         cmd_q->lsb = bitno;
352                                         ccp_clear_bit(lsb_pub, bitno);
353                                         break;
354                                 }
355                                 ccp_clear_bit(&qlsb, bitno);
356                                 bitno = ffs(qlsb) - 1;
357                         }
358                         if (bitno >= MAX_LSB_CNT)
359                                 return -EINVAL;
360                         n_lsbs--;
361                 }
362         }
363         return n_lsbs;
364 }
365
366 /* For each queue, from the most- to least-constrained:
367  * find an LSB that can be assigned to the queue. If there are N queues that
368  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
369  * dedicated LSB. Remaining LSB regions become a shared resource.
370  * If we have fewer LSBs than queues, all LSB regions become shared
371  * resources.
372  */
373 static int
374 ccp_assign_lsbs(struct ccp_device *ccp)
375 {
376         unsigned long lsb_pub = 0, qlsb = 0;
377         int n_lsbs = 0;
378         int bitno;
379         int i, lsb_cnt;
380         int rc = 0;
381
382         rte_spinlock_init(&ccp->lsb_lock);
383
384         /* Create an aggregate bitmap to get a total count of available LSBs */
385         for (i = 0; i < ccp->cmd_q_count; i++)
386                 lsb_pub |= ccp->cmd_q[i].lsbmask;
387
388         for (i = 0; i < MAX_LSB_CNT; i++)
389                 if (ccp_get_bit(&lsb_pub, i))
390                         n_lsbs++;
391
392         if (n_lsbs >= ccp->cmd_q_count) {
393                 /* We have enough LSBS to give every queue a private LSB.
394                  * Brute force search to start with the queues that are more
395                  * constrained in LSB choice. When an LSB is privately
396                  * assigned, it is removed from the public mask.
397                  * This is an ugly N squared algorithm with some optimization.
398                  */
399                 for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
400                      lsb_cnt++) {
401                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
402                                                           &lsb_pub);
403                         if (rc < 0)
404                                 return -EINVAL;
405                         n_lsbs = rc;
406                 }
407         }
408
409         rc = 0;
410         /* What's left of the LSBs, according to the public mask, now become
411          * shared. Any zero bits in the lsb_pub mask represent an LSB region
412          * that can't be used as a shared resource, so mark the LSB slots for
413          * them as "in use".
414          */
415         qlsb = lsb_pub;
416         bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
417         while (bitno < MAX_LSB_CNT) {
418                 ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
419                 ccp_set_bit(&qlsb, bitno);
420                 bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
421         }
422
423         return rc;
424 }
425
426 static int
427 ccp_add_device(struct ccp_device *dev, int type)
428 {
429         int i;
430         uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
431         uint64_t status;
432         struct ccp_queue *cmd_q;
433         const struct rte_memzone *q_mz;
434         void *vaddr;
435
436         if (dev == NULL)
437                 return -1;
438
439         dev->id = ccp_dev_id++;
440         dev->qidx = 0;
441         vaddr = (void *)(dev->pci.mem_resource[2].addr);
442
443         if (type == CCP_VERSION_5B) {
444                 CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
445                 CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
446                 for (i = 0; i < 12; i++) {
447                         CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
448                                       CCP_READ_REG(vaddr, TRNG_OUT_REG));
449                 }
450                 CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
451                 CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
452                 CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
453
454                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
455                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
456
457                 CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
458         }
459         CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
460
461         /* Copy the private LSB mask to the public registers */
462         status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
463         status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
464         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
465         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
466         status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
467
468         dev->cmd_q_count = 0;
469         /* Find available queues */
470         qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
471         for (i = 0; i < MAX_HW_QUEUES; i++) {
472                 if (!(qmr & (1 << i)))
473                         continue;
474                 cmd_q = &dev->cmd_q[dev->cmd_q_count++];
475                 cmd_q->dev = dev;
476                 cmd_q->id = i;
477                 cmd_q->qidx = 0;
478                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
479
480                 cmd_q->reg_base = (uint8_t *)vaddr +
481                         CMD_Q_STATUS_INCR * (i + 1);
482
483                 /* CCP queue memory */
484                 snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
485                          "%s_%d_%s_%d_%s",
486                          "ccp_dev",
487                          (int)dev->id, "queue",
488                          (int)cmd_q->id, "mem");
489                 q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
490                                                   cmd_q->qsize, SOCKET_ID_ANY);
491                 cmd_q->qbase_addr = (void *)q_mz->addr;
492                 cmd_q->qbase_desc = (void *)q_mz->addr;
493                 cmd_q->qbase_phys_addr =  q_mz->phys_addr;
494
495                 cmd_q->qcontrol = 0;
496                 /* init control reg to zero */
497                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
498                               cmd_q->qcontrol);
499
500                 /* Disable the interrupts */
501                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
502                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
503                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
504
505                 /* Clear the interrupts */
506                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
507                               ALL_INTERRUPTS);
508
509                 /* Configure size of each virtual queue accessible to host */
510                 cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
511                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
512
513                 dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
514                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
515                               (uint32_t)dma_addr_lo);
516                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
517                               (uint32_t)dma_addr_lo);
518
519                 dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
520                 cmd_q->qcontrol |= (dma_addr_hi << 16);
521                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
522                               cmd_q->qcontrol);
523
524                 /* create LSB Mask map */
525                 if (ccp_find_lsb_regions(cmd_q, status))
526                         CCP_LOG_ERR("queue doesn't have lsb regions");
527                 cmd_q->lsb = -1;
528
529                 rte_atomic64_init(&cmd_q->free_slots);
530                 rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
531                 /* unused slot barrier b/w H&T */
532         }
533
534         if (ccp_assign_lsbs(dev))
535                 CCP_LOG_ERR("Unable to assign lsb region");
536
537         /* pre-allocate LSB slots */
538         for (i = 0; i < dev->cmd_q_count; i++) {
539                 dev->cmd_q[i].sb_key =
540                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
541                 dev->cmd_q[i].sb_iv =
542                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
543                 dev->cmd_q[i].sb_sha =
544                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
545                 dev->cmd_q[i].sb_hmac =
546                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
547         }
548
549         TAILQ_INSERT_TAIL(&ccp_list, dev, next);
550         return 0;
551 }
552
553 static void
554 ccp_remove_device(struct ccp_device *dev)
555 {
556         if (dev == NULL)
557                 return;
558
559         TAILQ_REMOVE(&ccp_list, dev, next);
560 }
561
562 static int
563 is_ccp_device(const char *dirname,
564               const struct rte_pci_id *ccp_id,
565               int *type)
566 {
567         char filename[PATH_MAX];
568         const struct rte_pci_id *id;
569         uint16_t vendor, device_id;
570         int i;
571         unsigned long tmp;
572
573         /* get vendor id */
574         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
575         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
576                 return 0;
577         vendor = (uint16_t)tmp;
578
579         /* get device id */
580         snprintf(filename, sizeof(filename), "%s/device", dirname);
581         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
582                 return 0;
583         device_id = (uint16_t)tmp;
584
585         for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
586                 if (vendor == id->vendor_id &&
587                     device_id == id->device_id) {
588                         *type = i;
589                         return 1; /* Matched device */
590                 }
591         }
592         return 0;
593 }
594
595 static int
596 ccp_probe_device(const char *dirname, uint16_t domain,
597                  uint8_t bus, uint8_t devid,
598                  uint8_t function, int ccp_type)
599 {
600         struct ccp_device *ccp_dev = NULL;
601         struct rte_pci_device *pci;
602         char filename[PATH_MAX];
603         unsigned long tmp;
604         int uio_fd = -1, i, uio_num;
605         char uio_devname[PATH_MAX];
606         void *map_addr;
607
608         ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
609                               RTE_CACHE_LINE_SIZE);
610         if (ccp_dev == NULL)
611                 goto fail;
612         pci = &(ccp_dev->pci);
613
614         pci->addr.domain = domain;
615         pci->addr.bus = bus;
616         pci->addr.devid = devid;
617         pci->addr.function = function;
618
619         /* get vendor id */
620         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
621         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
622                 goto fail;
623         pci->id.vendor_id = (uint16_t)tmp;
624
625         /* get device id */
626         snprintf(filename, sizeof(filename), "%s/device", dirname);
627         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
628                 goto fail;
629         pci->id.device_id = (uint16_t)tmp;
630
631         /* get subsystem_vendor id */
632         snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
633                         dirname);
634         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
635                 goto fail;
636         pci->id.subsystem_vendor_id = (uint16_t)tmp;
637
638         /* get subsystem_device id */
639         snprintf(filename, sizeof(filename), "%s/subsystem_device",
640                         dirname);
641         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
642                 goto fail;
643         pci->id.subsystem_device_id = (uint16_t)tmp;
644
645         /* get class_id */
646         snprintf(filename, sizeof(filename), "%s/class",
647                         dirname);
648         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
649                 goto fail;
650         /* the least 24 bits are valid: class, subclass, program interface */
651         pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
652
653         /* parse resources */
654         snprintf(filename, sizeof(filename), "%s/resource", dirname);
655         if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
656                 goto fail;
657
658         uio_num = ccp_find_uio_devname(dirname);
659         if (uio_num < 0) {
660                 /*
661                  * It may take time for uio device to appear,
662                  * wait  here and try again
663                  */
664                 usleep(100000);
665                 uio_num = ccp_find_uio_devname(dirname);
666                 if (uio_num < 0)
667                         goto fail;
668         }
669         snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
670
671         uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
672         if (uio_fd < 0)
673                 goto fail;
674         if (flock(uio_fd, LOCK_EX | LOCK_NB))
675                 goto fail;
676
677         /* Map the PCI memory resource of device */
678         for (i = 0; i < PCI_MAX_RESOURCE; i++) {
679
680                 char devname[PATH_MAX];
681                 int res_fd;
682
683                 if (pci->mem_resource[i].phys_addr == 0)
684                         continue;
685                 snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
686                 res_fd = open(devname, O_RDWR);
687                 if (res_fd < 0)
688                         goto fail;
689                 map_addr = mmap(NULL, pci->mem_resource[i].len,
690                                 PROT_READ | PROT_WRITE,
691                                 MAP_SHARED, res_fd, 0);
692                 if (map_addr == MAP_FAILED)
693                         goto fail;
694
695                 pci->mem_resource[i].addr = map_addr;
696         }
697
698         /* device is valid, add in list */
699         if (ccp_add_device(ccp_dev, ccp_type)) {
700                 ccp_remove_device(ccp_dev);
701                 goto fail;
702         }
703
704         return 0;
705 fail:
706         CCP_LOG_ERR("CCP Device probe failed");
707         if (uio_fd > 0)
708                 close(uio_fd);
709         if (ccp_dev)
710                 rte_free(ccp_dev);
711         return -1;
712 }
713
714 int
715 ccp_probe_devices(const struct rte_pci_id *ccp_id)
716 {
717         int dev_cnt = 0;
718         int ccp_type = 0;
719         struct dirent *d;
720         DIR *dir;
721         int ret = 0;
722         int module_idx = 0;
723         uint16_t domain;
724         uint8_t bus, devid, function;
725         char dirname[PATH_MAX];
726
727         module_idx = ccp_check_pci_uio_module();
728         if (module_idx < 0)
729                 return -1;
730
731         TAILQ_INIT(&ccp_list);
732         dir = opendir(SYSFS_PCI_DEVICES);
733         if (dir == NULL)
734                 return -1;
735         while ((d = readdir(dir)) != NULL) {
736                 if (d->d_name[0] == '.')
737                         continue;
738                 if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
739                                         &domain, &bus, &devid, &function) != 0)
740                         continue;
741                 snprintf(dirname, sizeof(dirname), "%s/%s",
742                              SYSFS_PCI_DEVICES, d->d_name);
743                 if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
744                         printf("CCP : Detected CCP device with ID = 0x%x\n",
745                                ccp_id[ccp_type].device_id);
746                         ret = ccp_probe_device(dirname, domain, bus, devid,
747                                                function, ccp_type);
748                         if (ret == 0)
749                                 dev_cnt++;
750                 }
751         }
752         closedir(dir);
753         return dev_cnt;
754 }