34e44b34fd259872ee9eae6fe8edb3c4980d37c0
[dpdk.git] / drivers / crypto / ccp / ccp_dev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <sys/file.h>
13 #include <unistd.h>
14
15 #include <rte_hexdump.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_memory.h>
19 #include <rte_spinlock.h>
20 #include <rte_string_fns.h>
21
22 #include "ccp_dev.h"
23 #include "ccp_pci.h"
24 #include "ccp_pmd_private.h"
25
26 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
27 static int ccp_dev_id;
28
29 int
30 ccp_dev_start(struct rte_cryptodev *dev)
31 {
32         struct ccp_private *priv = dev->data->dev_private;
33
34         priv->last_dev = TAILQ_FIRST(&ccp_list);
35         return 0;
36 }
37
38 static const struct rte_memzone *
39 ccp_queue_dma_zone_reserve(const char *queue_name,
40                            uint32_t queue_size,
41                            int socket_id)
42 {
43         const struct rte_memzone *mz;
44
45         mz = rte_memzone_lookup(queue_name);
46         if (mz != 0) {
47                 if (((size_t)queue_size <= mz->len) &&
48                     ((socket_id == SOCKET_ID_ANY) ||
49                      (socket_id == mz->socket_id))) {
50                         CCP_LOG_INFO("re-use memzone already "
51                                      "allocated for %s", queue_name);
52                         return mz;
53                 }
54                 CCP_LOG_ERR("Incompatible memzone already "
55                             "allocated %s, size %u, socket %d. "
56                             "Requested size %u, socket %u",
57                             queue_name, (uint32_t)mz->len,
58                             mz->socket_id, queue_size, socket_id);
59                 return NULL;
60         }
61
62         CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
63                      queue_name, queue_size, socket_id);
64
65         return rte_memzone_reserve_aligned(queue_name, queue_size,
66                         socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
67 }
68
69 /* bitmap support apis */
70 static inline void
71 ccp_set_bit(unsigned long *bitmap, int n)
72 {
73         __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
74 }
75
76 static inline void
77 ccp_clear_bit(unsigned long *bitmap, int n)
78 {
79         __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
80 }
81
82 static inline uint32_t
83 ccp_get_bit(unsigned long *bitmap, int n)
84 {
85         return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
86 }
87
88
89 static inline uint32_t
90 ccp_ffz(unsigned long word)
91 {
92         unsigned long first_zero;
93
94         first_zero = __builtin_ffsl(~word);
95         return first_zero ? (first_zero - 1) :
96                 BITS_PER_WORD;
97 }
98
99 static inline uint32_t
100 ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
101 {
102         uint32_t i;
103         uint32_t nwords = 0;
104
105         nwords = (limit - 1) / BITS_PER_WORD + 1;
106         for (i = 0; i < nwords; i++) {
107                 if (addr[i] == 0UL)
108                         return i * BITS_PER_WORD;
109                 if (addr[i] < ~(0UL))
110                         break;
111         }
112         return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
113 }
114
115 static void
116 ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
117 {
118         unsigned long *p = map + WORD_OFFSET(start);
119         const unsigned int size = start + len;
120         int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
121         unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
122
123         while (len - bits_to_set >= 0) {
124                 *p |= mask_to_set;
125                 len -= bits_to_set;
126                 bits_to_set = BITS_PER_WORD;
127                 mask_to_set = ~0UL;
128                 p++;
129         }
130         if (len) {
131                 mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
132                 *p |= mask_to_set;
133         }
134 }
135
136 static void
137 ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
138 {
139         unsigned long *p = map + WORD_OFFSET(start);
140         const unsigned int size = start + len;
141         int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
142         unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
143
144         while (len - bits_to_clear >= 0) {
145                 *p &= ~mask_to_clear;
146                 len -= bits_to_clear;
147                 bits_to_clear = BITS_PER_WORD;
148                 mask_to_clear = ~0UL;
149                 p++;
150         }
151         if (len) {
152                 mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
153                 *p &= ~mask_to_clear;
154         }
155 }
156
157
158 static unsigned long
159 _ccp_find_next_bit(const unsigned long *addr,
160                    unsigned long nbits,
161                    unsigned long start,
162                    unsigned long invert)
163 {
164         unsigned long tmp;
165
166         if (!nbits || start >= nbits)
167                 return nbits;
168
169         tmp = addr[start / BITS_PER_WORD] ^ invert;
170
171         /* Handle 1st word. */
172         tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
173         start = ccp_round_down(start, BITS_PER_WORD);
174
175         while (!tmp) {
176                 start += BITS_PER_WORD;
177                 if (start >= nbits)
178                         return nbits;
179
180                 tmp = addr[start / BITS_PER_WORD] ^ invert;
181         }
182
183         return RTE_MIN(start + (ffs(tmp) - 1), nbits);
184 }
185
186 static unsigned long
187 ccp_find_next_bit(const unsigned long *addr,
188                   unsigned long size,
189                   unsigned long offset)
190 {
191         return _ccp_find_next_bit(addr, size, offset, 0UL);
192 }
193
194 static unsigned long
195 ccp_find_next_zero_bit(const unsigned long *addr,
196                        unsigned long size,
197                        unsigned long offset)
198 {
199         return _ccp_find_next_bit(addr, size, offset, ~0UL);
200 }
201
202 /**
203  * bitmap_find_next_zero_area - find a contiguous aligned zero area
204  * @map: The address to base the search on
205  * @size: The bitmap size in bits
206  * @start: The bitnumber to start searching at
207  * @nr: The number of zeroed bits we're looking for
208  */
209 static unsigned long
210 ccp_bitmap_find_next_zero_area(unsigned long *map,
211                                unsigned long size,
212                                unsigned long start,
213                                unsigned int nr)
214 {
215         unsigned long index, end, i;
216
217 again:
218         index = ccp_find_next_zero_bit(map, size, start);
219
220         end = index + nr;
221         if (end > size)
222                 return end;
223         i = ccp_find_next_bit(map, end, index);
224         if (i < end) {
225                 start = i + 1;
226                 goto again;
227         }
228         return index;
229 }
230
231 static uint32_t
232 ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
233 {
234         struct ccp_device *ccp;
235         int start;
236
237         /* First look at the map for the queue */
238         if (cmd_q->lsb >= 0) {
239                 start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
240                                                                  LSB_SIZE, 0,
241                                                                  count);
242                 if (start < LSB_SIZE) {
243                         ccp_bitmap_set(cmd_q->lsbmap, start, count);
244                         return start + cmd_q->lsb * LSB_SIZE;
245                 }
246         }
247
248         /* try to get an entry from the shared blocks */
249         ccp = cmd_q->dev;
250
251         rte_spinlock_lock(&ccp->lsb_lock);
252
253         start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
254                                                     MAX_LSB_CNT * LSB_SIZE,
255                                                     0, count);
256         if (start <= MAX_LSB_CNT * LSB_SIZE) {
257                 ccp_bitmap_set(ccp->lsbmap, start, count);
258                 rte_spinlock_unlock(&ccp->lsb_lock);
259                 return start * LSB_ITEM_SIZE;
260         }
261         CCP_LOG_ERR("NO LSBs available");
262
263         rte_spinlock_unlock(&ccp->lsb_lock);
264
265         return 0;
266 }
267
268 static void __rte_unused
269 ccp_lsb_free(struct ccp_queue *cmd_q,
270              unsigned int start,
271              unsigned int count)
272 {
273         int lsbno = start / LSB_SIZE;
274
275         if (!start)
276                 return;
277
278         if (cmd_q->lsb == lsbno) {
279                 /* An entry from the private LSB */
280                 ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
281         } else {
282                 /* From the shared LSBs */
283                 struct ccp_device *ccp = cmd_q->dev;
284
285                 rte_spinlock_lock(&ccp->lsb_lock);
286                 ccp_bitmap_clear(ccp->lsbmap, start, count);
287                 rte_spinlock_unlock(&ccp->lsb_lock);
288         }
289 }
290
291 static int
292 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
293 {
294         int q_mask = 1 << cmd_q->id;
295         int weight = 0;
296         int j;
297
298         /* Build a bit mask to know which LSBs
299          * this queue has access to.
300          * Don't bother with segment 0
301          * as it has special
302          * privileges.
303          */
304         cmd_q->lsbmask = 0;
305         status >>= LSB_REGION_WIDTH;
306         for (j = 1; j < MAX_LSB_CNT; j++) {
307                 if (status & q_mask)
308                         ccp_set_bit(&cmd_q->lsbmask, j);
309
310                 status >>= LSB_REGION_WIDTH;
311         }
312
313         for (j = 0; j < MAX_LSB_CNT; j++)
314                 if (ccp_get_bit(&cmd_q->lsbmask, j))
315                         weight++;
316
317         printf("Queue %d can access %d LSB regions  of mask  %lu\n",
318                (int)cmd_q->id, weight, cmd_q->lsbmask);
319
320         return weight ? 0 : -EINVAL;
321 }
322
323 static int
324 ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
325                              int lsb_cnt, int n_lsbs,
326                              unsigned long *lsb_pub)
327 {
328         unsigned long qlsb = 0;
329         int bitno = 0;
330         int qlsb_wgt = 0;
331         int i, j;
332
333         /* For each queue:
334          * If the count of potential LSBs available to a queue matches the
335          * ordinal given to us in lsb_cnt:
336          * Copy the mask of possible LSBs for this queue into "qlsb";
337          * For each bit in qlsb, see if the corresponding bit in the
338          * aggregation mask is set; if so, we have a match.
339          *     If we have a match, clear the bit in the aggregation to
340          *     mark it as no longer available.
341          *     If there is no match, clear the bit in qlsb and keep looking.
342          */
343         for (i = 0; i < ccp->cmd_q_count; i++) {
344                 struct ccp_queue *cmd_q = &ccp->cmd_q[i];
345
346                 qlsb_wgt = 0;
347                 for (j = 0; j < MAX_LSB_CNT; j++)
348                         if (ccp_get_bit(&cmd_q->lsbmask, j))
349                                 qlsb_wgt++;
350
351                 if (qlsb_wgt == lsb_cnt) {
352                         qlsb = cmd_q->lsbmask;
353
354                         bitno = ffs(qlsb) - 1;
355                         while (bitno < MAX_LSB_CNT) {
356                                 if (ccp_get_bit(lsb_pub, bitno)) {
357                                         /* We found an available LSB
358                                          * that this queue can access
359                                          */
360                                         cmd_q->lsb = bitno;
361                                         ccp_clear_bit(lsb_pub, bitno);
362                                         break;
363                                 }
364                                 ccp_clear_bit(&qlsb, bitno);
365                                 bitno = ffs(qlsb) - 1;
366                         }
367                         if (bitno >= MAX_LSB_CNT)
368                                 return -EINVAL;
369                         n_lsbs--;
370                 }
371         }
372         return n_lsbs;
373 }
374
375 /* For each queue, from the most- to least-constrained:
376  * find an LSB that can be assigned to the queue. If there are N queues that
377  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
378  * dedicated LSB. Remaining LSB regions become a shared resource.
379  * If we have fewer LSBs than queues, all LSB regions become shared
380  * resources.
381  */
382 static int
383 ccp_assign_lsbs(struct ccp_device *ccp)
384 {
385         unsigned long lsb_pub = 0, qlsb = 0;
386         int n_lsbs = 0;
387         int bitno;
388         int i, lsb_cnt;
389         int rc = 0;
390
391         rte_spinlock_init(&ccp->lsb_lock);
392
393         /* Create an aggregate bitmap to get a total count of available LSBs */
394         for (i = 0; i < ccp->cmd_q_count; i++)
395                 lsb_pub |= ccp->cmd_q[i].lsbmask;
396
397         for (i = 0; i < MAX_LSB_CNT; i++)
398                 if (ccp_get_bit(&lsb_pub, i))
399                         n_lsbs++;
400
401         if (n_lsbs >= ccp->cmd_q_count) {
402                 /* We have enough LSBS to give every queue a private LSB.
403                  * Brute force search to start with the queues that are more
404                  * constrained in LSB choice. When an LSB is privately
405                  * assigned, it is removed from the public mask.
406                  * This is an ugly N squared algorithm with some optimization.
407                  */
408                 for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
409                      lsb_cnt++) {
410                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
411                                                           &lsb_pub);
412                         if (rc < 0)
413                                 return -EINVAL;
414                         n_lsbs = rc;
415                 }
416         }
417
418         rc = 0;
419         /* What's left of the LSBs, according to the public mask, now become
420          * shared. Any zero bits in the lsb_pub mask represent an LSB region
421          * that can't be used as a shared resource, so mark the LSB slots for
422          * them as "in use".
423          */
424         qlsb = lsb_pub;
425         bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
426         while (bitno < MAX_LSB_CNT) {
427                 ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
428                 ccp_set_bit(&qlsb, bitno);
429                 bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
430         }
431
432         return rc;
433 }
434
435 static int
436 ccp_add_device(struct ccp_device *dev, int type)
437 {
438         int i;
439         uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
440         uint64_t status;
441         struct ccp_queue *cmd_q;
442         const struct rte_memzone *q_mz;
443         void *vaddr;
444
445         if (dev == NULL)
446                 return -1;
447
448         dev->id = ccp_dev_id++;
449         dev->qidx = 0;
450         vaddr = (void *)(dev->pci.mem_resource[2].addr);
451
452         if (type == CCP_VERSION_5B) {
453                 CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
454                 CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
455                 for (i = 0; i < 12; i++) {
456                         CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
457                                       CCP_READ_REG(vaddr, TRNG_OUT_REG));
458                 }
459                 CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
460                 CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
461                 CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
462
463                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
464                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
465
466                 CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
467         }
468         CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
469
470         /* Copy the private LSB mask to the public registers */
471         status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
472         status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
473         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
474         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
475         status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
476
477         dev->cmd_q_count = 0;
478         /* Find available queues */
479         qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
480         for (i = 0; i < MAX_HW_QUEUES; i++) {
481                 if (!(qmr & (1 << i)))
482                         continue;
483                 cmd_q = &dev->cmd_q[dev->cmd_q_count++];
484                 cmd_q->dev = dev;
485                 cmd_q->id = i;
486                 cmd_q->qidx = 0;
487                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
488
489                 cmd_q->reg_base = (uint8_t *)vaddr +
490                         CMD_Q_STATUS_INCR * (i + 1);
491
492                 /* CCP queue memory */
493                 snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
494                          "%s_%d_%s_%d_%s",
495                          "ccp_dev",
496                          (int)dev->id, "queue",
497                          (int)cmd_q->id, "mem");
498                 q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
499                                                   cmd_q->qsize, SOCKET_ID_ANY);
500                 cmd_q->qbase_addr = (void *)q_mz->addr;
501                 cmd_q->qbase_desc = (void *)q_mz->addr;
502                 cmd_q->qbase_phys_addr =  q_mz->phys_addr;
503
504                 cmd_q->qcontrol = 0;
505                 /* init control reg to zero */
506                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
507                               cmd_q->qcontrol);
508
509                 /* Disable the interrupts */
510                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
511                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
512                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
513
514                 /* Clear the interrupts */
515                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
516                               ALL_INTERRUPTS);
517
518                 /* Configure size of each virtual queue accessible to host */
519                 cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
520                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
521
522                 dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
523                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
524                               (uint32_t)dma_addr_lo);
525                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
526                               (uint32_t)dma_addr_lo);
527
528                 dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
529                 cmd_q->qcontrol |= (dma_addr_hi << 16);
530                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
531                               cmd_q->qcontrol);
532
533                 /* create LSB Mask map */
534                 if (ccp_find_lsb_regions(cmd_q, status))
535                         CCP_LOG_ERR("queue doesn't have lsb regions");
536                 cmd_q->lsb = -1;
537
538                 rte_atomic64_init(&cmd_q->free_slots);
539                 rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
540                 /* unused slot barrier b/w H&T */
541         }
542
543         if (ccp_assign_lsbs(dev))
544                 CCP_LOG_ERR("Unable to assign lsb region");
545
546         /* pre-allocate LSB slots */
547         for (i = 0; i < dev->cmd_q_count; i++) {
548                 dev->cmd_q[i].sb_key =
549                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
550                 dev->cmd_q[i].sb_iv =
551                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
552                 dev->cmd_q[i].sb_sha =
553                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
554                 dev->cmd_q[i].sb_hmac =
555                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
556         }
557
558         TAILQ_INSERT_TAIL(&ccp_list, dev, next);
559         return 0;
560 }
561
562 static void
563 ccp_remove_device(struct ccp_device *dev)
564 {
565         if (dev == NULL)
566                 return;
567
568         TAILQ_REMOVE(&ccp_list, dev, next);
569 }
570
571 static int
572 is_ccp_device(const char *dirname,
573               const struct rte_pci_id *ccp_id,
574               int *type)
575 {
576         char filename[PATH_MAX];
577         const struct rte_pci_id *id;
578         uint16_t vendor, device_id;
579         int i;
580         unsigned long tmp;
581
582         /* get vendor id */
583         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
584         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
585                 return 0;
586         vendor = (uint16_t)tmp;
587
588         /* get device id */
589         snprintf(filename, sizeof(filename), "%s/device", dirname);
590         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
591                 return 0;
592         device_id = (uint16_t)tmp;
593
594         for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
595                 if (vendor == id->vendor_id &&
596                     device_id == id->device_id) {
597                         *type = i;
598                         return 1; /* Matched device */
599                 }
600         }
601         return 0;
602 }
603
604 static int
605 ccp_probe_device(const char *dirname, uint16_t domain,
606                  uint8_t bus, uint8_t devid,
607                  uint8_t function, int ccp_type)
608 {
609         struct ccp_device *ccp_dev = NULL;
610         struct rte_pci_device *pci;
611         char filename[PATH_MAX];
612         unsigned long tmp;
613         int uio_fd = -1, i, uio_num;
614         char uio_devname[PATH_MAX];
615         void *map_addr;
616
617         ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
618                               RTE_CACHE_LINE_SIZE);
619         if (ccp_dev == NULL)
620                 goto fail;
621         pci = &(ccp_dev->pci);
622
623         pci->addr.domain = domain;
624         pci->addr.bus = bus;
625         pci->addr.devid = devid;
626         pci->addr.function = function;
627
628         /* get vendor id */
629         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
630         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
631                 goto fail;
632         pci->id.vendor_id = (uint16_t)tmp;
633
634         /* get device id */
635         snprintf(filename, sizeof(filename), "%s/device", dirname);
636         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
637                 goto fail;
638         pci->id.device_id = (uint16_t)tmp;
639
640         /* get subsystem_vendor id */
641         snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
642                         dirname);
643         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
644                 goto fail;
645         pci->id.subsystem_vendor_id = (uint16_t)tmp;
646
647         /* get subsystem_device id */
648         snprintf(filename, sizeof(filename), "%s/subsystem_device",
649                         dirname);
650         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
651                 goto fail;
652         pci->id.subsystem_device_id = (uint16_t)tmp;
653
654         /* get class_id */
655         snprintf(filename, sizeof(filename), "%s/class",
656                         dirname);
657         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
658                 goto fail;
659         /* the least 24 bits are valid: class, subclass, program interface */
660         pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
661
662         /* parse resources */
663         snprintf(filename, sizeof(filename), "%s/resource", dirname);
664         if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
665                 goto fail;
666
667         uio_num = ccp_find_uio_devname(dirname);
668         if (uio_num < 0) {
669                 /*
670                  * It may take time for uio device to appear,
671                  * wait  here and try again
672                  */
673                 usleep(100000);
674                 uio_num = ccp_find_uio_devname(dirname);
675                 if (uio_num < 0)
676                         goto fail;
677         }
678         snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
679
680         uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
681         if (uio_fd < 0)
682                 goto fail;
683         if (flock(uio_fd, LOCK_EX | LOCK_NB))
684                 goto fail;
685
686         /* Map the PCI memory resource of device */
687         for (i = 0; i < PCI_MAX_RESOURCE; i++) {
688
689                 char devname[PATH_MAX];
690                 int res_fd;
691
692                 if (pci->mem_resource[i].phys_addr == 0)
693                         continue;
694                 snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
695                 res_fd = open(devname, O_RDWR);
696                 if (res_fd < 0)
697                         goto fail;
698                 map_addr = mmap(NULL, pci->mem_resource[i].len,
699                                 PROT_READ | PROT_WRITE,
700                                 MAP_SHARED, res_fd, 0);
701                 if (map_addr == MAP_FAILED)
702                         goto fail;
703
704                 pci->mem_resource[i].addr = map_addr;
705         }
706
707         /* device is valid, add in list */
708         if (ccp_add_device(ccp_dev, ccp_type)) {
709                 ccp_remove_device(ccp_dev);
710                 goto fail;
711         }
712
713         return 0;
714 fail:
715         CCP_LOG_ERR("CCP Device probe failed");
716         if (uio_fd > 0)
717                 close(uio_fd);
718         if (ccp_dev)
719                 rte_free(ccp_dev);
720         return -1;
721 }
722
723 int
724 ccp_probe_devices(const struct rte_pci_id *ccp_id)
725 {
726         int dev_cnt = 0;
727         int ccp_type = 0;
728         struct dirent *d;
729         DIR *dir;
730         int ret = 0;
731         int module_idx = 0;
732         uint16_t domain;
733         uint8_t bus, devid, function;
734         char dirname[PATH_MAX];
735
736         module_idx = ccp_check_pci_uio_module();
737         if (module_idx < 0)
738                 return -1;
739
740         TAILQ_INIT(&ccp_list);
741         dir = opendir(SYSFS_PCI_DEVICES);
742         if (dir == NULL)
743                 return -1;
744         while ((d = readdir(dir)) != NULL) {
745                 if (d->d_name[0] == '.')
746                         continue;
747                 if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
748                                         &domain, &bus, &devid, &function) != 0)
749                         continue;
750                 snprintf(dirname, sizeof(dirname), "%s/%s",
751                              SYSFS_PCI_DEVICES, d->d_name);
752                 if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
753                         printf("CCP : Detected CCP device with ID = 0x%x\n",
754                                ccp_id[ccp_type].device_id);
755                         ret = ccp_probe_device(dirname, domain, bus, devid,
756                                                function, ccp_type);
757                         if (ret == 0)
758                                 dev_cnt++;
759                 }
760         }
761         closedir(dir);
762         return dev_cnt;
763 }