crypto/qat: update raw data path
[dpdk.git] / drivers / crypto / ccp / ccp_dev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  */
4
5 #include <dirent.h>
6 #include <fcntl.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/mman.h>
10 #include <sys/queue.h>
11 #include <sys/types.h>
12 #include <sys/file.h>
13 #include <unistd.h>
14
15 #include <rte_hexdump.h>
16 #include <rte_memzone.h>
17 #include <rte_malloc.h>
18 #include <rte_memory.h>
19 #include <rte_spinlock.h>
20 #include <rte_string_fns.h>
21
22 #include "ccp_dev.h"
23 #include "ccp_pci.h"
24 #include "ccp_pmd_private.h"
25
26 int iommu_mode;
27 struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
28 static int ccp_dev_id;
29
30 int
31 ccp_dev_start(struct rte_cryptodev *dev)
32 {
33         struct ccp_private *priv = dev->data->dev_private;
34
35         priv->last_dev = TAILQ_FIRST(&ccp_list);
36         return 0;
37 }
38
39 struct ccp_queue *
40 ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
41 {
42         int i, ret = 0;
43         struct ccp_device *dev;
44         struct ccp_private *priv = cdev->data->dev_private;
45
46         dev = TAILQ_NEXT(priv->last_dev, next);
47         if (unlikely(dev == NULL))
48                 dev = TAILQ_FIRST(&ccp_list);
49         priv->last_dev = dev;
50         if (dev->qidx >= dev->cmd_q_count)
51                 dev->qidx = 0;
52         ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
53         if (ret >= slot_req)
54                 return &dev->cmd_q[dev->qidx];
55         for (i = 0; i < dev->cmd_q_count; i++) {
56                 dev->qidx++;
57                 if (dev->qidx >= dev->cmd_q_count)
58                         dev->qidx = 0;
59                 ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
60                 if (ret >= slot_req)
61                         return &dev->cmd_q[dev->qidx];
62         }
63         return NULL;
64 }
65
66 int
67 ccp_read_hwrng(uint32_t *value)
68 {
69         struct ccp_device *dev;
70
71         TAILQ_FOREACH(dev, &ccp_list, next) {
72                 void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
73
74                 while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
75                         *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
76                         if (*value) {
77                                 dev->hwrng_retries = 0;
78                                 return 0;
79                         }
80                 }
81                 dev->hwrng_retries = 0;
82         }
83         return -1;
84 }
85
86 static const struct rte_memzone *
87 ccp_queue_dma_zone_reserve(const char *queue_name,
88                            uint32_t queue_size,
89                            int socket_id)
90 {
91         const struct rte_memzone *mz;
92
93         mz = rte_memzone_lookup(queue_name);
94         if (mz != 0) {
95                 if (((size_t)queue_size <= mz->len) &&
96                     ((socket_id == SOCKET_ID_ANY) ||
97                      (socket_id == mz->socket_id))) {
98                         CCP_LOG_INFO("re-use memzone already "
99                                      "allocated for %s", queue_name);
100                         return mz;
101                 }
102                 CCP_LOG_ERR("Incompatible memzone already "
103                             "allocated %s, size %u, socket %d. "
104                             "Requested size %u, socket %u",
105                             queue_name, (uint32_t)mz->len,
106                             mz->socket_id, queue_size, socket_id);
107                 return NULL;
108         }
109
110         CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
111                      queue_name, queue_size, socket_id);
112
113         return rte_memzone_reserve_aligned(queue_name, queue_size,
114                         socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
115 }
116
117 /* bitmap support apis */
118 static inline void
119 ccp_set_bit(unsigned long *bitmap, int n)
120 {
121         __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
122 }
123
124 static inline void
125 ccp_clear_bit(unsigned long *bitmap, int n)
126 {
127         __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
128 }
129
130 static inline uint32_t
131 ccp_get_bit(unsigned long *bitmap, int n)
132 {
133         return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
134 }
135
136
137 static inline uint32_t
138 ccp_ffz(unsigned long word)
139 {
140         unsigned long first_zero;
141
142         first_zero = __builtin_ffsl(~word);
143         return first_zero ? (first_zero - 1) :
144                 BITS_PER_WORD;
145 }
146
147 static inline uint32_t
148 ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
149 {
150         uint32_t i;
151         uint32_t nwords = 0;
152
153         nwords = (limit - 1) / BITS_PER_WORD + 1;
154         for (i = 0; i < nwords; i++) {
155                 if (addr[i] == 0UL)
156                         return i * BITS_PER_WORD;
157                 if (addr[i] < ~(0UL))
158                         break;
159         }
160         return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
161 }
162
163 static void
164 ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
165 {
166         unsigned long *p = map + WORD_OFFSET(start);
167         const unsigned int size = start + len;
168         int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
169         unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
170
171         while (len - bits_to_set >= 0) {
172                 *p |= mask_to_set;
173                 len -= bits_to_set;
174                 bits_to_set = BITS_PER_WORD;
175                 mask_to_set = ~0UL;
176                 p++;
177         }
178         if (len) {
179                 mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
180                 *p |= mask_to_set;
181         }
182 }
183
184 static void
185 ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
186 {
187         unsigned long *p = map + WORD_OFFSET(start);
188         const unsigned int size = start + len;
189         int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
190         unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
191
192         while (len - bits_to_clear >= 0) {
193                 *p &= ~mask_to_clear;
194                 len -= bits_to_clear;
195                 bits_to_clear = BITS_PER_WORD;
196                 mask_to_clear = ~0UL;
197                 p++;
198         }
199         if (len) {
200                 mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
201                 *p &= ~mask_to_clear;
202         }
203 }
204
205
206 static unsigned long
207 _ccp_find_next_bit(const unsigned long *addr,
208                    unsigned long nbits,
209                    unsigned long start,
210                    unsigned long invert)
211 {
212         unsigned long tmp;
213
214         if (!nbits || start >= nbits)
215                 return nbits;
216
217         tmp = addr[start / BITS_PER_WORD] ^ invert;
218
219         /* Handle 1st word. */
220         tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
221         start = ccp_round_down(start, BITS_PER_WORD);
222
223         while (!tmp) {
224                 start += BITS_PER_WORD;
225                 if (start >= nbits)
226                         return nbits;
227
228                 tmp = addr[start / BITS_PER_WORD] ^ invert;
229         }
230
231         return RTE_MIN(start + (ffs(tmp) - 1), nbits);
232 }
233
234 static unsigned long
235 ccp_find_next_bit(const unsigned long *addr,
236                   unsigned long size,
237                   unsigned long offset)
238 {
239         return _ccp_find_next_bit(addr, size, offset, 0UL);
240 }
241
242 static unsigned long
243 ccp_find_next_zero_bit(const unsigned long *addr,
244                        unsigned long size,
245                        unsigned long offset)
246 {
247         return _ccp_find_next_bit(addr, size, offset, ~0UL);
248 }
249
250 /**
251  * bitmap_find_next_zero_area - find a contiguous aligned zero area
252  * @map: The address to base the search on
253  * @size: The bitmap size in bits
254  * @start: The bitnumber to start searching at
255  * @nr: The number of zeroed bits we're looking for
256  */
257 static unsigned long
258 ccp_bitmap_find_next_zero_area(unsigned long *map,
259                                unsigned long size,
260                                unsigned long start,
261                                unsigned int nr)
262 {
263         unsigned long index, end, i;
264
265 again:
266         index = ccp_find_next_zero_bit(map, size, start);
267
268         end = index + nr;
269         if (end > size)
270                 return end;
271         i = ccp_find_next_bit(map, end, index);
272         if (i < end) {
273                 start = i + 1;
274                 goto again;
275         }
276         return index;
277 }
278
279 static uint32_t
280 ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
281 {
282         struct ccp_device *ccp;
283         int start;
284
285         /* First look at the map for the queue */
286         if (cmd_q->lsb >= 0) {
287                 start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
288                                                                  LSB_SIZE, 0,
289                                                                  count);
290                 if (start < LSB_SIZE) {
291                         ccp_bitmap_set(cmd_q->lsbmap, start, count);
292                         return start + cmd_q->lsb * LSB_SIZE;
293                 }
294         }
295
296         /* try to get an entry from the shared blocks */
297         ccp = cmd_q->dev;
298
299         rte_spinlock_lock(&ccp->lsb_lock);
300
301         start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
302                                                     MAX_LSB_CNT * LSB_SIZE,
303                                                     0, count);
304         if (start <= MAX_LSB_CNT * LSB_SIZE) {
305                 ccp_bitmap_set(ccp->lsbmap, start, count);
306                 rte_spinlock_unlock(&ccp->lsb_lock);
307                 return start * LSB_ITEM_SIZE;
308         }
309         CCP_LOG_ERR("NO LSBs available");
310
311         rte_spinlock_unlock(&ccp->lsb_lock);
312
313         return 0;
314 }
315
316 static void __rte_unused
317 ccp_lsb_free(struct ccp_queue *cmd_q,
318              unsigned int start,
319              unsigned int count)
320 {
321         int lsbno = start / LSB_SIZE;
322
323         if (!start)
324                 return;
325
326         if (cmd_q->lsb == lsbno) {
327                 /* An entry from the private LSB */
328                 ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
329         } else {
330                 /* From the shared LSBs */
331                 struct ccp_device *ccp = cmd_q->dev;
332
333                 rte_spinlock_lock(&ccp->lsb_lock);
334                 ccp_bitmap_clear(ccp->lsbmap, start, count);
335                 rte_spinlock_unlock(&ccp->lsb_lock);
336         }
337 }
338
339 static int
340 ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
341 {
342         int q_mask = 1 << cmd_q->id;
343         int weight = 0;
344         int j;
345
346         /* Build a bit mask to know which LSBs
347          * this queue has access to.
348          * Don't bother with segment 0
349          * as it has special
350          * privileges.
351          */
352         cmd_q->lsbmask = 0;
353         status >>= LSB_REGION_WIDTH;
354         for (j = 1; j < MAX_LSB_CNT; j++) {
355                 if (status & q_mask)
356                         ccp_set_bit(&cmd_q->lsbmask, j);
357
358                 status >>= LSB_REGION_WIDTH;
359         }
360
361         for (j = 0; j < MAX_LSB_CNT; j++)
362                 if (ccp_get_bit(&cmd_q->lsbmask, j))
363                         weight++;
364
365         printf("Queue %d can access %d LSB regions  of mask  %lu\n",
366                (int)cmd_q->id, weight, cmd_q->lsbmask);
367
368         return weight ? 0 : -EINVAL;
369 }
370
371 static int
372 ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
373                              int lsb_cnt, int n_lsbs,
374                              unsigned long *lsb_pub)
375 {
376         unsigned long qlsb = 0;
377         int bitno = 0;
378         int qlsb_wgt = 0;
379         int i, j;
380
381         /* For each queue:
382          * If the count of potential LSBs available to a queue matches the
383          * ordinal given to us in lsb_cnt:
384          * Copy the mask of possible LSBs for this queue into "qlsb";
385          * For each bit in qlsb, see if the corresponding bit in the
386          * aggregation mask is set; if so, we have a match.
387          *     If we have a match, clear the bit in the aggregation to
388          *     mark it as no longer available.
389          *     If there is no match, clear the bit in qlsb and keep looking.
390          */
391         for (i = 0; i < ccp->cmd_q_count; i++) {
392                 struct ccp_queue *cmd_q = &ccp->cmd_q[i];
393
394                 qlsb_wgt = 0;
395                 for (j = 0; j < MAX_LSB_CNT; j++)
396                         if (ccp_get_bit(&cmd_q->lsbmask, j))
397                                 qlsb_wgt++;
398
399                 if (qlsb_wgt == lsb_cnt) {
400                         qlsb = cmd_q->lsbmask;
401
402                         bitno = ffs(qlsb) - 1;
403                         while (bitno < MAX_LSB_CNT) {
404                                 if (ccp_get_bit(lsb_pub, bitno)) {
405                                         /* We found an available LSB
406                                          * that this queue can access
407                                          */
408                                         cmd_q->lsb = bitno;
409                                         ccp_clear_bit(lsb_pub, bitno);
410                                         break;
411                                 }
412                                 ccp_clear_bit(&qlsb, bitno);
413                                 bitno = ffs(qlsb) - 1;
414                         }
415                         if (bitno >= MAX_LSB_CNT)
416                                 return -EINVAL;
417                         n_lsbs--;
418                 }
419         }
420         return n_lsbs;
421 }
422
423 /* For each queue, from the most- to least-constrained:
424  * find an LSB that can be assigned to the queue. If there are N queues that
425  * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
426  * dedicated LSB. Remaining LSB regions become a shared resource.
427  * If we have fewer LSBs than queues, all LSB regions become shared
428  * resources.
429  */
430 static int
431 ccp_assign_lsbs(struct ccp_device *ccp)
432 {
433         unsigned long lsb_pub = 0, qlsb = 0;
434         int n_lsbs = 0;
435         int bitno;
436         int i, lsb_cnt;
437         int rc = 0;
438
439         rte_spinlock_init(&ccp->lsb_lock);
440
441         /* Create an aggregate bitmap to get a total count of available LSBs */
442         for (i = 0; i < ccp->cmd_q_count; i++)
443                 lsb_pub |= ccp->cmd_q[i].lsbmask;
444
445         for (i = 0; i < MAX_LSB_CNT; i++)
446                 if (ccp_get_bit(&lsb_pub, i))
447                         n_lsbs++;
448
449         if (n_lsbs >= ccp->cmd_q_count) {
450                 /* We have enough LSBS to give every queue a private LSB.
451                  * Brute force search to start with the queues that are more
452                  * constrained in LSB choice. When an LSB is privately
453                  * assigned, it is removed from the public mask.
454                  * This is an ugly N squared algorithm with some optimization.
455                  */
456                 for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
457                      lsb_cnt++) {
458                         rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
459                                                           &lsb_pub);
460                         if (rc < 0)
461                                 return -EINVAL;
462                         n_lsbs = rc;
463                 }
464         }
465
466         rc = 0;
467         /* What's left of the LSBs, according to the public mask, now become
468          * shared. Any zero bits in the lsb_pub mask represent an LSB region
469          * that can't be used as a shared resource, so mark the LSB slots for
470          * them as "in use".
471          */
472         qlsb = lsb_pub;
473         bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
474         while (bitno < MAX_LSB_CNT) {
475                 ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
476                 ccp_set_bit(&qlsb, bitno);
477                 bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
478         }
479
480         return rc;
481 }
482
483 static int
484 ccp_add_device(struct ccp_device *dev, int type)
485 {
486         int i;
487         uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
488         uint64_t status;
489         struct ccp_queue *cmd_q;
490         const struct rte_memzone *q_mz;
491         void *vaddr;
492
493         if (dev == NULL)
494                 return -1;
495
496         dev->id = ccp_dev_id++;
497         dev->qidx = 0;
498         vaddr = (void *)(dev->pci.mem_resource[2].addr);
499
500         if (type == CCP_VERSION_5B) {
501                 CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
502                 CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
503                 for (i = 0; i < 12; i++) {
504                         CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
505                                       CCP_READ_REG(vaddr, TRNG_OUT_REG));
506                 }
507                 CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
508                 CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
509                 CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
510
511                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
512                 CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
513
514                 CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
515         }
516         CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x0);
517
518         /* Copy the private LSB mask to the public registers */
519         status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
520         status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
521         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
522         CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
523         status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
524
525         dev->cmd_q_count = 0;
526         /* Find available queues */
527         qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
528         for (i = 0; i < MAX_HW_QUEUES; i++) {
529                 if (!(qmr & (1 << i)))
530                         continue;
531                 cmd_q = &dev->cmd_q[dev->cmd_q_count++];
532                 cmd_q->dev = dev;
533                 cmd_q->id = i;
534                 cmd_q->qidx = 0;
535                 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
536
537                 cmd_q->reg_base = (uint8_t *)vaddr +
538                         CMD_Q_STATUS_INCR * (i + 1);
539
540                 /* CCP queue memory */
541                 snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
542                          "%s_%d_%s_%d_%s",
543                          "ccp_dev",
544                          (int)dev->id, "queue",
545                          (int)cmd_q->id, "mem");
546                 q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
547                                                   cmd_q->qsize, SOCKET_ID_ANY);
548                 cmd_q->qbase_addr = (void *)q_mz->addr;
549                 cmd_q->qbase_desc = (void *)q_mz->addr;
550                 cmd_q->qbase_phys_addr =  q_mz->iova;
551
552                 cmd_q->qcontrol = 0;
553                 /* init control reg to zero */
554                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
555                               cmd_q->qcontrol);
556
557                 /* Disable the interrupts */
558                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
559                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
560                 CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
561
562                 /* Clear the interrupts */
563                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
564                               ALL_INTERRUPTS);
565
566                 /* Configure size of each virtual queue accessible to host */
567                 cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
568                 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
569
570                 dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
571                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
572                               (uint32_t)dma_addr_lo);
573                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
574                               (uint32_t)dma_addr_lo);
575
576                 dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
577                 cmd_q->qcontrol |= (dma_addr_hi << 16);
578                 CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
579                               cmd_q->qcontrol);
580
581                 /* create LSB Mask map */
582                 if (ccp_find_lsb_regions(cmd_q, status))
583                         CCP_LOG_ERR("queue doesn't have lsb regions");
584                 cmd_q->lsb = -1;
585
586                 rte_atomic64_init(&cmd_q->free_slots);
587                 rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
588                 /* unused slot barrier b/w H&T */
589         }
590
591         if (ccp_assign_lsbs(dev))
592                 CCP_LOG_ERR("Unable to assign lsb region");
593
594         /* pre-allocate LSB slots */
595         for (i = 0; i < dev->cmd_q_count; i++) {
596                 dev->cmd_q[i].sb_key =
597                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
598                 dev->cmd_q[i].sb_iv =
599                         ccp_lsb_alloc(&dev->cmd_q[i], 1);
600                 dev->cmd_q[i].sb_sha =
601                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
602                 dev->cmd_q[i].sb_hmac =
603                         ccp_lsb_alloc(&dev->cmd_q[i], 2);
604         }
605
606         TAILQ_INSERT_TAIL(&ccp_list, dev, next);
607         return 0;
608 }
609
610 static void
611 ccp_remove_device(struct ccp_device *dev)
612 {
613         if (dev == NULL)
614                 return;
615
616         TAILQ_REMOVE(&ccp_list, dev, next);
617 }
618
619 static int
620 is_ccp_device(const char *dirname,
621               const struct rte_pci_id *ccp_id,
622               int *type)
623 {
624         char filename[PATH_MAX];
625         const struct rte_pci_id *id;
626         uint16_t vendor, device_id;
627         int i;
628         unsigned long tmp;
629
630         /* get vendor id */
631         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
632         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
633                 return 0;
634         vendor = (uint16_t)tmp;
635
636         /* get device id */
637         snprintf(filename, sizeof(filename), "%s/device", dirname);
638         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
639                 return 0;
640         device_id = (uint16_t)tmp;
641
642         for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
643                 if (vendor == id->vendor_id &&
644                     device_id == id->device_id) {
645                         *type = i;
646                         return 1; /* Matched device */
647                 }
648         }
649         return 0;
650 }
651
652 static int
653 ccp_probe_device(const char *dirname, uint16_t domain,
654                  uint8_t bus, uint8_t devid,
655                  uint8_t function, int ccp_type)
656 {
657         struct ccp_device *ccp_dev = NULL;
658         struct rte_pci_device *pci;
659         char filename[PATH_MAX];
660         unsigned long tmp;
661         int uio_fd = -1;
662
663         ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
664                               RTE_CACHE_LINE_SIZE);
665         if (ccp_dev == NULL)
666                 goto fail;
667         pci = &(ccp_dev->pci);
668
669         pci->addr.domain = domain;
670         pci->addr.bus = bus;
671         pci->addr.devid = devid;
672         pci->addr.function = function;
673
674         /* get vendor id */
675         snprintf(filename, sizeof(filename), "%s/vendor", dirname);
676         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
677                 goto fail;
678         pci->id.vendor_id = (uint16_t)tmp;
679
680         /* get device id */
681         snprintf(filename, sizeof(filename), "%s/device", dirname);
682         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
683                 goto fail;
684         pci->id.device_id = (uint16_t)tmp;
685
686         /* get subsystem_vendor id */
687         snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
688                         dirname);
689         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
690                 goto fail;
691         pci->id.subsystem_vendor_id = (uint16_t)tmp;
692
693         /* get subsystem_device id */
694         snprintf(filename, sizeof(filename), "%s/subsystem_device",
695                         dirname);
696         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
697                 goto fail;
698         pci->id.subsystem_device_id = (uint16_t)tmp;
699
700         /* get class_id */
701         snprintf(filename, sizeof(filename), "%s/class",
702                         dirname);
703         if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
704                 goto fail;
705         /* the least 24 bits are valid: class, subclass, program interface */
706         pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
707
708         /* parse resources */
709         snprintf(filename, sizeof(filename), "%s/resource", dirname);
710         if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
711                 goto fail;
712         if (iommu_mode == 2)
713                 pci->kdrv = RTE_PCI_KDRV_VFIO;
714         else if (iommu_mode == 0)
715                 pci->kdrv = RTE_PCI_KDRV_IGB_UIO;
716         else if (iommu_mode == 1)
717                 pci->kdrv = RTE_PCI_KDRV_UIO_GENERIC;
718
719         rte_pci_map_device(pci);
720
721         /* device is valid, add in list */
722         if (ccp_add_device(ccp_dev, ccp_type)) {
723                 ccp_remove_device(ccp_dev);
724                 goto fail;
725         }
726
727         return 0;
728 fail:
729         CCP_LOG_ERR("CCP Device probe failed");
730         if (uio_fd >= 0)
731                 close(uio_fd);
732         if (ccp_dev)
733                 rte_free(ccp_dev);
734         return -1;
735 }
736
737 int
738 ccp_probe_devices(const struct rte_pci_id *ccp_id)
739 {
740         int dev_cnt = 0;
741         int ccp_type = 0;
742         struct dirent *d;
743         DIR *dir;
744         int ret = 0;
745         int module_idx = 0;
746         uint16_t domain;
747         uint8_t bus, devid, function;
748         char dirname[PATH_MAX];
749
750         module_idx = ccp_check_pci_uio_module();
751         if (module_idx < 0)
752                 return -1;
753
754         iommu_mode = module_idx;
755         TAILQ_INIT(&ccp_list);
756         dir = opendir(SYSFS_PCI_DEVICES);
757         if (dir == NULL)
758                 return -1;
759         while ((d = readdir(dir)) != NULL) {
760                 if (d->d_name[0] == '.')
761                         continue;
762                 if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
763                                         &domain, &bus, &devid, &function) != 0)
764                         continue;
765                 snprintf(dirname, sizeof(dirname), "%s/%s",
766                              SYSFS_PCI_DEVICES, d->d_name);
767                 if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
768                         printf("CCP : Detected CCP device with ID = 0x%x\n",
769                                ccp_id[ccp_type].device_id);
770                         ret = ccp_probe_device(dirname, domain, bus, devid,
771                                                function, ccp_type);
772                         if (ret == 0)
773                                 dev_cnt++;
774                 }
775         }
776         closedir(dir);
777         return dev_cnt;
778 }