48bebb289e0db6122e91d8c692859266b57aa9bd
[dpdk.git] / drivers / crypto / ccp / ccp_dev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <sys/file.h>
13 #include <unistd.h>
14
15 #include <rte_hexdump.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_memory.h>
19 #include <rte_spinlock.h>
20 #include <rte_string_fns.h>
21
22 #include "ccp_dev.h"
23 #include "ccp_pci.h"
24 #include "ccp_pmd_private.h"
25
26 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
27 static int ccp_dev_id;
28
29 int
30 ccp_dev_start(struct rte_cryptodev *dev)
31 {
32         struct ccp_private *priv = dev->data->dev_private;
33
34         priv->last_dev = TAILQ_FIRST(&ccp_list);
35         return 0;
36 }
37
38 struct ccp_queue *
39 ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
40 {
41         int i, ret = 0;
42         struct ccp_device *dev;
43         struct ccp_private *priv = cdev->data->dev_private;
44
45         dev = TAILQ_NEXT(priv->last_dev, next);
46         if (unlikely(dev == NULL))
47                 dev = TAILQ_FIRST(&ccp_list);
48         priv->last_dev = dev;
49         if (dev->qidx >= dev->cmd_q_count)
50                 dev->qidx = 0;
51         ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
52         if (ret >= slot_req)
53                 return &dev->cmd_q[dev->qidx];
54         for (i = 0; i < dev->cmd_q_count; i++) {
55                 dev->qidx++;
56                 if (dev->qidx >= dev->cmd_q_count)
57                         dev->qidx = 0;
58                 ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
59                 if (ret >= slot_req)
60                         return &dev->cmd_q[dev->qidx];
61         }
62         return NULL;
63 }
64
65 static const struct rte_memzone *
66 ccp_queue_dma_zone_reserve(const char *queue_name,
67                            uint32_t queue_size,
68                            int socket_id)
69 {
70         const struct rte_memzone *mz;
71
72         mz = rte_memzone_lookup(queue_name);
73         if (mz != 0) {
74                 if (((size_t)queue_size <= mz->len) &&
75                     ((socket_id == SOCKET_ID_ANY) ||
76                      (socket_id == mz->socket_id))) {
77                         CCP_LOG_INFO("re-use memzone already "
78                                      "allocated for %s", queue_name);
79                         return mz;
80                 }
81                 CCP_LOG_ERR("Incompatible memzone already "
82                             "allocated %s, size %u, socket %d. "
83                             "Requested size %u, socket %u",
84                             queue_name, (uint32_t)mz->len,
85                             mz->socket_id, queue_size, socket_id);
86                 return NULL;
87         }
88
89         CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
90                      queue_name, queue_size, socket_id);
91
92         return rte_memzone_reserve_aligned(queue_name, queue_size,
93                         socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
94 }
95
96 /* bitmap support apis */
97 static inline void
98 ccp_set_bit(unsigned long *bitmap, int n)
99 {
100         __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
101 }
102
103 static inline void
104 ccp_clear_bit(unsigned long *bitmap, int n)
105 {
106         __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
107 }
108
109 static inline uint32_t
110 ccp_get_bit(unsigned long *bitmap, int n)
111 {
112         return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
113 }
114
115
116 static inline uint32_t
117 ccp_ffz(unsigned long word)
118 {
119         unsigned long first_zero;
120
121         first_zero = __builtin_ffsl(~word);
122         return first_zero ? (first_zero - 1) :
123                 BITS_PER_WORD;
124 }
125
126 static inline uint32_t
127 ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
128 {
129         uint32_t i;
130         uint32_t nwords = 0;
131
132         nwords = (limit - 1) / BITS_PER_WORD + 1;
133         for (i = 0; i < nwords; i++) {
134                 if (addr[i] == 0UL)
135                         return i * BITS_PER_WORD;
136                 if (addr[i] < ~(0UL))
137                         break;
138         }
139         return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
140 }
141
142 static void
143 ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
144 {
145         unsigned long *p = map + WORD_OFFSET(start);
146         const unsigned int size = start + len;
147         int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
148         unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
149
150         while (len - bits_to_set >= 0) {
151                 *p |= mask_to_set;
152                 len -= bits_to_set;
153                 bits_to_set = BITS_PER_WORD;
154                 mask_to_set = ~0UL;
155                 p++;
156         }
157         if (len) {
158                 mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
159                 *p |= mask_to_set;
160         }
161 }
162
163 static void
164 ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
165 {
166         unsigned long *p = map + WORD_OFFSET(start);
167         const unsigned int size = start + len;
168         int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
169         unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
170
171         while (len - bits_to_clear >= 0) {
172                 *p &= ~mask_to_clear;
173                 len -= bits_to_clear;
174                 bits_to_clear = BITS_PER_WORD;
175                 mask_to_clear = ~0UL;
176                 p++;
177         }
178         if (len) {
179                 mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
180                 *p &= ~mask_to_clear;
181         }
182 }
183
184
185 static unsigned long
186 _ccp_find_next_bit(const unsigned long *addr,
187                    unsigned long nbits,
188                    unsigned long start,
189                    unsigned long invert)
190 {
191         unsigned long tmp;
192
193         if (!nbits || start >= nbits)
194                 return nbits;
195
196         tmp = addr[start / BITS_PER_WORD] ^ invert;
197
198         /* Handle 1st word. */
199         tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
200         start = ccp_round_down(start, BITS_PER_WORD);
201
202         while (!tmp) {
203                 start += BITS_PER_WORD;
204                 if (start >= nbits)
205                         return nbits;
206
207                 tmp = addr[start / BITS_PER_WORD] ^ invert;
208         }
209
210         return RTE_MIN(start + (ffs(tmp) - 1), nbits);
211 }
212
213 static unsigned long
214 ccp_find_next_bit(const unsigned long *addr,
215                   unsigned long size,
216                   unsigned long offset)
217 {
218         return _ccp_find_next_bit(addr, size, offset, 0UL);
219 }
220
221 static unsigned long
222 ccp_find_next_zero_bit(const unsigned long *addr,
223                        unsigned long size,
224                        unsigned long offset)
225 {
226         return _ccp_find_next_bit(addr, size, offset, ~0UL);
227 }
228
229 /**
230  * bitmap_find_next_zero_area - find a contiguous aligned zero area
231  * @map: The address to base the search on
232  * @size: The bitmap size in bits
233  * @start: The bitnumber to start searching at
234  * @nr: The number of zeroed bits we're looking for
235  */
236 static unsigned long
237 ccp_bitmap_find_next_zero_area(unsigned long *map,
238                                unsigned long size,
239                                unsigned long start,
240                                unsigned int nr)
241 {
242         unsigned long index, end, i;
243
244 again:
245         index = ccp_find_next_zero_bit(map, size, start);
246
247         end = index + nr;
248         if (end > size)
249                 return end;
250         i = ccp_find_next_bit(map, end, index);
251         if (i < end) {
252                 start = i + 1;
253                 goto again;
254         }
255         return index;
256 }
257
258 static uint32_t
259 ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
260 {
261         struct ccp_device *ccp;
262         int start;
263
264         /* First look at the map for the queue */
265         if (cmd_q->lsb >= 0) {
266                 start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
267                                                                  LSB_SIZE, 0,
268                                                                  count);
269                 if (start < LSB_SIZE) {
270                         ccp_bitmap_set(cmd_q->lsbmap, start, count);
271                         return start + cmd_q->lsb * LSB_SIZE;
272                 }
273         }
274
275         /* try to get an entry from the shared blocks */
276         ccp = cmd_q->dev;
277
278         rte_spinlock_lock(&ccp->lsb_lock);
279
280         start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
281                                                     MAX_LSB_CNT * LSB_SIZE,
282                                                     0, count);
283         if (start <= MAX_LSB_CNT * LSB_SIZE) {
284                 ccp_bitmap_set(ccp->lsbmap, start, count);
285                 rte_spinlock_unlock(&ccp->lsb_lock);
286                 return start * LSB_ITEM_SIZE;
287         }
288         CCP_LOG_ERR("NO LSBs available");
289
290         rte_spinlock_unlock(&ccp->lsb_lock);
291
292         return 0;
293 }
294
295 static void __rte_unused
296 ccp_lsb_free(struct ccp_queue *cmd_q,
297              unsigned int start,
298              unsigned int count)
299 {
300         int lsbno = start / LSB_SIZE;
301
302         if (!start)
303                 return;
304
305         if (cmd_q->lsb == lsbno) {
306                 /* An entry from the private LSB */
307                 ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
308         } else {
309                 /* From the shared LSBs */
310                 struct ccp_device *ccp = cmd_q->dev;
311
312                 rte_spinlock_lock(&ccp->lsb_lock);
313                 ccp_bitmap_clear(ccp->lsbmap, start, count);
314                 rte_spinlock_unlock(&ccp->lsb_lock);
315         }
316 }
317
318 static int
319 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
320 {
321         int q_mask = 1 << cmd_q->id;
322         int weight = 0;
323         int j;
324
325         /* Build a bit mask to know which LSBs
326          * this queue has access to.
327          * Don't bother with segment 0
328          * as it has special
329          * privileges.
330          */
331         cmd_q->lsbmask = 0;
332         status >>= LSB_REGION_WIDTH;
333         for (j = 1; j < MAX_LSB_CNT; j++) {
334                 if (status & q_mask)
335                         ccp_set_bit(&cmd_q->lsbmask, j);
336
337                 status >>= LSB_REGION_WIDTH;
338         }
339
340         for (j = 0; j < MAX_LSB_CNT; j++)
341                 if (ccp_get_bit(&cmd_q->lsbmask, j))
342                         weight++;
343
344         printf("Queue %d can access %d LSB regions  of mask  %lu\n",
345                (int)cmd_q->id, weight, cmd_q->lsbmask);
346
347         return weight ? 0 : -EINVAL;
348 }
349
350 static int
351 ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
352                              int lsb_cnt, int n_lsbs,
353                              unsigned long *lsb_pub)
354 {
355         unsigned long qlsb = 0;
356         int bitno = 0;
357         int qlsb_wgt = 0;
358         int i, j;
359
360         /* For each queue:
361          * If the count of potential LSBs available to a queue matches the
362          * ordinal given to us in lsb_cnt:
363          * Copy the mask of possible LSBs for this queue into "qlsb";
364          * For each bit in qlsb, see if the corresponding bit in the
365          * aggregation mask is set; if so, we have a match.
366          *     If we have a match, clear the bit in the aggregation to
367          *     mark it as no longer available.
368          *     If there is no match, clear the bit in qlsb and keep looking.
369          */
370         for (i = 0; i < ccp->cmd_q_count; i++) {
371                 struct ccp_queue *cmd_q = &ccp->cmd_q[i];
372
373                 qlsb_wgt = 0;
374                 for (j = 0; j < MAX_LSB_CNT; j++)
375                         if (ccp_get_bit(&cmd_q->lsbmask, j))
376                                 qlsb_wgt++;
377
378                 if (qlsb_wgt == lsb_cnt) {
379                         qlsb = cmd_q->lsbmask;
380
381                         bitno = ffs(qlsb) - 1;
382                         while (bitno < MAX_LSB_CNT) {
383                                 if (ccp_get_bit(lsb_pub, bitno)) {
384                                         /* We found an available LSB
385                                          * that this queue can access
386                                          */
387                                         cmd_q->lsb = bitno;
388                                         ccp_clear_bit(lsb_pub, bitno);
389                                         break;
390                                 }
391                                 ccp_clear_bit(&qlsb, bitno);
392                                 bitno = ffs(qlsb) - 1;
393                         }
394                         if (bitno >= MAX_LSB_CNT)
395                                 return -EINVAL;
396                         n_lsbs--;
397                 }
398         }
399         return n_lsbs;
400 }
401
402 /* For each queue, from the most- to least-constrained:
403  * find an LSB that can be assigned to the queue. If there are N queues that
404  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
405  * dedicated LSB. Remaining LSB regions become a shared resource.
406  * If we have fewer LSBs than queues, all LSB regions become shared
407  * resources.
408  */
409 static int
410 ccp_assign_lsbs(struct ccp_device *ccp)
411 {
412         unsigned long lsb_pub = 0, qlsb = 0;
413         int n_lsbs = 0;
414         int bitno;
415         int i, lsb_cnt;
416         int rc = 0;
417
418         rte_spinlock_init(&ccp->lsb_lock);
419
420         /* Create an aggregate bitmap to get a total count of available LSBs */
421         for (i = 0; i < ccp->cmd_q_count; i++)
422                 lsb_pub |= ccp->cmd_q[i].lsbmask;
423
424         for (i = 0; i < MAX_LSB_CNT; i++)
425                 if (ccp_get_bit(&lsb_pub, i))
426                         n_lsbs++;
427
428         if (n_lsbs >= ccp->cmd_q_count) {
429                 /* We have enough LSBS to give every queue a private LSB.
430                  * Brute force search to start with the queues that are more
431                  * constrained in LSB choice. When an LSB is privately
432                  * assigned, it is removed from the public mask.
433                  * This is an ugly N squared algorithm with some optimization.
434                  */
435                 for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
436                      lsb_cnt++) {
437                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
438                                                           &lsb_pub);
439                         if (rc < 0)
440                                 return -EINVAL;
441                         n_lsbs = rc;
442                 }
443         }
444
445         rc = 0;
446         /* What's left of the LSBs, according to the public mask, now become
447          * shared. Any zero bits in the lsb_pub mask represent an LSB region
448          * that can't be used as a shared resource, so mark the LSB slots for
449          * them as "in use".
450          */
451         qlsb = lsb_pub;
452         bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
453         while (bitno < MAX_LSB_CNT) {
454                 ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
455                 ccp_set_bit(&qlsb, bitno);
456                 bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
457         }
458
459         return rc;
460 }
461
462 static int
463 ccp_add_device(struct ccp_device *dev, int type)
464 {
465         int i;
466         uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
467         uint64_t status;
468         struct ccp_queue *cmd_q;
469         const struct rte_memzone *q_mz;
470         void *vaddr;
471
472         if (dev == NULL)
473                 return -1;
474
475         dev->id = ccp_dev_id++;
476         dev->qidx = 0;
477         vaddr = (void *)(dev->pci.mem_resource[2].addr);
478
479         if (type == CCP_VERSION_5B) {
480                 CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
481                 CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
482                 for (i = 0; i < 12; i++) {
483                         CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
484                                       CCP_READ_REG(vaddr, TRNG_OUT_REG));
485                 }
486                 CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
487                 CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
488                 CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
489
490                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
491                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
492
493                 CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
494         }
495         CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
496
497         /* Copy the private LSB mask to the public registers */
498         status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
499         status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
500         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
501         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
502         status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
503
504         dev->cmd_q_count = 0;
505         /* Find available queues */
506         qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
507         for (i = 0; i < MAX_HW_QUEUES; i++) {
508                 if (!(qmr & (1 << i)))
509                         continue;
510                 cmd_q = &dev->cmd_q[dev->cmd_q_count++];
511                 cmd_q->dev = dev;
512                 cmd_q->id = i;
513                 cmd_q->qidx = 0;
514                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
515
516                 cmd_q->reg_base = (uint8_t *)vaddr +
517                         CMD_Q_STATUS_INCR * (i + 1);
518
519                 /* CCP queue memory */
520                 snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
521                          "%s_%d_%s_%d_%s",
522                          "ccp_dev",
523                          (int)dev->id, "queue",
524                          (int)cmd_q->id, "mem");
525                 q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
526                                                   cmd_q->qsize, SOCKET_ID_ANY);
527                 cmd_q->qbase_addr = (void *)q_mz->addr;
528                 cmd_q->qbase_desc = (void *)q_mz->addr;
529                 cmd_q->qbase_phys_addr =  q_mz->phys_addr;
530
531                 cmd_q->qcontrol = 0;
532                 /* init control reg to zero */
533                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
534                               cmd_q->qcontrol);
535
536                 /* Disable the interrupts */
537                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
538                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
539                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
540
541                 /* Clear the interrupts */
542                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
543                               ALL_INTERRUPTS);
544
545                 /* Configure size of each virtual queue accessible to host */
546                 cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
547                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
548
549                 dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
550                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
551                               (uint32_t)dma_addr_lo);
552                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
553                               (uint32_t)dma_addr_lo);
554
555                 dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
556                 cmd_q->qcontrol |= (dma_addr_hi << 16);
557                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
558                               cmd_q->qcontrol);
559
560                 /* create LSB Mask map */
561                 if (ccp_find_lsb_regions(cmd_q, status))
562                         CCP_LOG_ERR("queue doesn't have lsb regions");
563                 cmd_q->lsb = -1;
564
565                 rte_atomic64_init(&cmd_q->free_slots);
566                 rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
567                 /* unused slot barrier b/w H&T */
568         }
569
570         if (ccp_assign_lsbs(dev))
571                 CCP_LOG_ERR("Unable to assign lsb region");
572
573         /* pre-allocate LSB slots */
574         for (i = 0; i < dev->cmd_q_count; i++) {
575                 dev->cmd_q[i].sb_key =
576                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
577                 dev->cmd_q[i].sb_iv =
578                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
579                 dev->cmd_q[i].sb_sha =
580                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
581                 dev->cmd_q[i].sb_hmac =
582                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
583         }
584
585         TAILQ_INSERT_TAIL(&ccp_list, dev, next);
586         return 0;
587 }
588
589 static void
590 ccp_remove_device(struct ccp_device *dev)
591 {
592         if (dev == NULL)
593                 return;
594
595         TAILQ_REMOVE(&ccp_list, dev, next);
596 }
597
598 static int
599 is_ccp_device(const char *dirname,
600               const struct rte_pci_id *ccp_id,
601               int *type)
602 {
603         char filename[PATH_MAX];
604         const struct rte_pci_id *id;
605         uint16_t vendor, device_id;
606         int i;
607         unsigned long tmp;
608
609         /* get vendor id */
610         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
611         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
612                 return 0;
613         vendor = (uint16_t)tmp;
614
615         /* get device id */
616         snprintf(filename, sizeof(filename), "%s/device", dirname);
617         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
618                 return 0;
619         device_id = (uint16_t)tmp;
620
621         for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
622                 if (vendor == id->vendor_id &&
623                     device_id == id->device_id) {
624                         *type = i;
625                         return 1; /* Matched device */
626                 }
627         }
628         return 0;
629 }
630
631 static int
632 ccp_probe_device(const char *dirname, uint16_t domain,
633                  uint8_t bus, uint8_t devid,
634                  uint8_t function, int ccp_type)
635 {
636         struct ccp_device *ccp_dev = NULL;
637         struct rte_pci_device *pci;
638         char filename[PATH_MAX];
639         unsigned long tmp;
640         int uio_fd = -1, i, uio_num;
641         char uio_devname[PATH_MAX];
642         void *map_addr;
643
644         ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
645                               RTE_CACHE_LINE_SIZE);
646         if (ccp_dev == NULL)
647                 goto fail;
648         pci = &(ccp_dev->pci);
649
650         pci->addr.domain = domain;
651         pci->addr.bus = bus;
652         pci->addr.devid = devid;
653         pci->addr.function = function;
654
655         /* get vendor id */
656         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
657         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
658                 goto fail;
659         pci->id.vendor_id = (uint16_t)tmp;
660
661         /* get device id */
662         snprintf(filename, sizeof(filename), "%s/device", dirname);
663         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
664                 goto fail;
665         pci->id.device_id = (uint16_t)tmp;
666
667         /* get subsystem_vendor id */
668         snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
669                         dirname);
670         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
671                 goto fail;
672         pci->id.subsystem_vendor_id = (uint16_t)tmp;
673
674         /* get subsystem_device id */
675         snprintf(filename, sizeof(filename), "%s/subsystem_device",
676                         dirname);
677         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
678                 goto fail;
679         pci->id.subsystem_device_id = (uint16_t)tmp;
680
681         /* get class_id */
682         snprintf(filename, sizeof(filename), "%s/class",
683                         dirname);
684         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
685                 goto fail;
686         /* the least 24 bits are valid: class, subclass, program interface */
687         pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
688
689         /* parse resources */
690         snprintf(filename, sizeof(filename), "%s/resource", dirname);
691         if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
692                 goto fail;
693
694         uio_num = ccp_find_uio_devname(dirname);
695         if (uio_num < 0) {
696                 /*
697                  * It may take time for uio device to appear,
698                  * wait  here and try again
699                  */
700                 usleep(100000);
701                 uio_num = ccp_find_uio_devname(dirname);
702                 if (uio_num < 0)
703                         goto fail;
704         }
705         snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
706
707         uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
708         if (uio_fd < 0)
709                 goto fail;
710         if (flock(uio_fd, LOCK_EX | LOCK_NB))
711                 goto fail;
712
713         /* Map the PCI memory resource of device */
714         for (i = 0; i < PCI_MAX_RESOURCE; i++) {
715
716                 char devname[PATH_MAX];
717                 int res_fd;
718
719                 if (pci->mem_resource[i].phys_addr == 0)
720                         continue;
721                 snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
722                 res_fd = open(devname, O_RDWR);
723                 if (res_fd < 0)
724                         goto fail;
725                 map_addr = mmap(NULL, pci->mem_resource[i].len,
726                                 PROT_READ | PROT_WRITE,
727                                 MAP_SHARED, res_fd, 0);
728                 if (map_addr == MAP_FAILED)
729                         goto fail;
730
731                 pci->mem_resource[i].addr = map_addr;
732         }
733
734         /* device is valid, add in list */
735         if (ccp_add_device(ccp_dev, ccp_type)) {
736                 ccp_remove_device(ccp_dev);
737                 goto fail;
738         }
739
740         return 0;
741 fail:
742         CCP_LOG_ERR("CCP Device probe failed");
743         if (uio_fd > 0)
744                 close(uio_fd);
745         if (ccp_dev)
746                 rte_free(ccp_dev);
747         return -1;
748 }
749
750 int
751 ccp_probe_devices(const struct rte_pci_id *ccp_id)
752 {
753         int dev_cnt = 0;
754         int ccp_type = 0;
755         struct dirent *d;
756         DIR *dir;
757         int ret = 0;
758         int module_idx = 0;
759         uint16_t domain;
760         uint8_t bus, devid, function;
761         char dirname[PATH_MAX];
762
763         module_idx = ccp_check_pci_uio_module();
764         if (module_idx < 0)
765                 return -1;
766
767         TAILQ_INIT(&ccp_list);
768         dir = opendir(SYSFS_PCI_DEVICES);
769         if (dir == NULL)
770                 return -1;
771         while ((d = readdir(dir)) != NULL) {
772                 if (d->d_name[0] == '.')
773                         continue;
774                 if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
775                                         &domain, &bus, &devid, &function) != 0)
776                         continue;
777                 snprintf(dirname, sizeof(dirname), "%s/%s",
778                              SYSFS_PCI_DEVICES, d->d_name);
779                 if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
780                         printf("CCP : Detected CCP device with ID = 0x%x\n",
781                                ccp_id[ccp_type].device_id);
782                         ret = ccp_probe_device(dirname, domain, bus, devid,
783                                                function, ccp_type);
784                         if (ret == 0)
785                                 dev_cnt++;
786                 }
787         }
788         closedir(dir);
789         return dev_cnt;
790 }