From: Ravi Kumar Date: Mon, 19 Mar 2018 12:23:36 +0000 (-0400) Subject: crypto/ccp: support device init X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=ef4b04f87fa6388f781cadf3735b2d06bf34b424;p=dpdk.git crypto/ccp: support device init CCP PMD is a virtual crypto PMD which schedules a number of available actual CCP hardware engines underneath. The PMD manages all devices by its own. The PMD supports CCP_5a and CCP_5b versions of crypto engines and this patch adds support to initialize and use such devices. Signed-off-by: Ravi Kumar --- diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 70d9559e63..6697cf5051 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -26,5 +26,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c new file mode 100644 index 0000000000..724b5bd004 --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.c @@ -0,0 +1,754 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" +#include "ccp_pci.h" +#include "ccp_pmd_private.h" + +struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); +static int ccp_dev_id; + +static const struct rte_memzone * +ccp_queue_dma_zone_reserve(const char *queue_name, + uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + CCP_LOG_INFO("re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + CCP_LOG_ERR("Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +/* bitmap support apis */ +static inline void +ccp_set_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n))); +} + +static inline void +ccp_clear_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n))); +} + +static inline uint32_t +ccp_get_bit(unsigned long *bitmap, int n) +{ + return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0); +} + + +static inline uint32_t +ccp_ffz(unsigned long word) +{ + unsigned long first_zero; + + first_zero = __builtin_ffsl(~word); + return first_zero ? (first_zero - 1) : + BITS_PER_WORD; +} + +static inline uint32_t +ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit) +{ + uint32_t i; + uint32_t nwords = 0; + + nwords = (limit - 1) / BITS_PER_WORD + 1; + for (i = 0; i < nwords; i++) { + if (addr[i] == 0UL) + return i * BITS_PER_WORD; + if (addr[i] < ~(0UL)) + break; + } + return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]); +} + +static void +ccp_bitmap_set(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_set >= 0) { + *p |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_WORD; + mask_to_set = ~0UL; + p++; + } + if (len) { + mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + +static void +ccp_bitmap_clear(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + len -= bits_to_clear; + bits_to_clear = BITS_PER_WORD; + mask_to_clear = ~0UL; + p++; + } + if (len) { + mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} + + +static unsigned long +_ccp_find_next_bit(const unsigned long *addr, + unsigned long nbits, + unsigned long start, + unsigned long invert) +{ + unsigned long tmp; + + if (!nbits || start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + + /* Handle 1st word. */ + tmp &= CCP_BITMAP_FIRST_WORD_MASK(start); + start = ccp_round_down(start, BITS_PER_WORD); + + while (!tmp) { + start += BITS_PER_WORD; + if (start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + } + + return RTE_MIN(start + (ffs(tmp) - 1), nbits); +} + +static unsigned long +ccp_find_next_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, 0UL); +} + +static unsigned long +ccp_find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, ~0UL); +} + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + */ +static unsigned long +ccp_bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr) +{ + unsigned long index, end, i; + +again: + index = ccp_find_next_zero_bit(map, size, start); + + end = index + nr; + if (end > size) + return end; + i = ccp_find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} + +static uint32_t +ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, 0, + count); + if (start < LSB_SIZE) { + ccp_bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* try to get an entry from the shared blocks */ + ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + + start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, count); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + ccp_bitmap_set(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + return start * LSB_ITEM_SIZE; + } + CCP_LOG_ERR("NO LSBs available"); + + rte_spinlock_unlock(&ccp->lsb_lock); + + return 0; +} + +static void __rte_unused +ccp_lsb_free(struct ccp_queue *cmd_q, + unsigned int start, + unsigned int count) +{ + int lsbno = start / LSB_SIZE; + + if (!start) + return; + + if (cmd_q->lsb == lsbno) { + /* An entry from the private LSB */ + ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + ccp_bitmap_clear(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + } +} + +static int +ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) +{ + int q_mask = 1 << cmd_q->id; + int weight = 0; + int j; + + /* Build a bit mask to know which LSBs + * this queue has access to. + * Don't bother with segment 0 + * as it has special + * privileges. + */ + cmd_q->lsbmask = 0; + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + ccp_set_bit(&cmd_q->lsbmask, j); + + status >>= LSB_REGION_WIDTH; + } + + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + + printf("Queue %d can access %d LSB regions of mask %lu\n", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +} + +static int +ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + unsigned long qlsb = 0; + int bitno = 0; + int qlsb_wgt = 0; + int i, j; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = 0; + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + qlsb_wgt++; + + if (qlsb_wgt == lsb_cnt) { + qlsb = cmd_q->lsbmask; + + bitno = ffs(qlsb) - 1; + while (bitno < MAX_LSB_CNT) { + if (ccp_get_bit(lsb_pub, bitno)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + ccp_clear_bit(lsb_pub, bitno); + break; + } + ccp_clear_bit(&qlsb, bitno); + bitno = ffs(qlsb) - 1; + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared + * resources. + */ +static int +ccp_assign_lsbs(struct ccp_device *ccp) +{ + unsigned long lsb_pub = 0, qlsb = 0; + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + rte_spinlock_init(&ccp->lsb_lock); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + lsb_pub |= ccp->cmd_q[i].lsbmask; + + for (i = 0; i < MAX_LSB_CNT; i++) + if (ccp_get_bit(&lsb_pub, i)) + n_lsbs++; + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + &lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + qlsb = lsb_pub; + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + ccp_set_bit(&qlsb, bitno); + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static int +ccp_add_device(struct ccp_device *dev, int type) +{ + int i; + uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi; + uint64_t status; + struct ccp_queue *cmd_q; + const struct rte_memzone *q_mz; + void *vaddr; + + if (dev == NULL) + return -1; + + dev->id = ccp_dev_id++; + dev->qidx = 0; + vaddr = (void *)(dev->pci.mem_resource[2].addr); + + if (type == CCP_VERSION_5B) { + CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57); + CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003); + for (i = 0; i < 12; i++) { + CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET, + CCP_READ_REG(vaddr, TRNG_OUT_REG)); + } + CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F); + CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D); + CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000); + + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF); + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF); + + CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823); + } + CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249); + + /* Copy the private LSB mask to the public registers */ + status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi); + status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo); + + dev->cmd_q_count = 0; + /* Find available queues */ + qmr = CCP_READ_REG(vaddr, Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + if (!(qmr & (1 << i))) + continue; + cmd_q = &dev->cmd_q[dev->cmd_q_count++]; + cmd_q->dev = dev; + cmd_q->id = i; + cmd_q->qidx = 0; + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + + cmd_q->reg_base = (uint8_t *)vaddr + + CMD_Q_STATUS_INCR * (i + 1); + + /* CCP queue memory */ + snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name), + "%s_%d_%s_%d_%s", + "ccp_dev", + (int)dev->id, "queue", + (int)cmd_q->id, "mem"); + q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name, + cmd_q->qsize, SOCKET_ID_ANY); + cmd_q->qbase_addr = (void *)q_mz->addr; + cmd_q->qbase_desc = (void *)q_mz->addr; + cmd_q->qbase_phys_addr = q_mz->phys_addr; + + cmd_q->qcontrol = 0; + /* init control reg to zero */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* Disable the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE); + + /* Clear the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE, + ALL_INTERRUPTS); + + /* Configure size of each virtual queue accessible to host */ + cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT); + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT; + + dma_addr_lo = low32_value(cmd_q->qbase_phys_addr); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, + (uint32_t)dma_addr_lo); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE, + (uint32_t)dma_addr_lo); + + dma_addr_hi = high32_value(cmd_q->qbase_phys_addr); + cmd_q->qcontrol |= (dma_addr_hi << 16); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* create LSB Mask map */ + if (ccp_find_lsb_regions(cmd_q, status)) + CCP_LOG_ERR("queue doesn't have lsb regions"); + cmd_q->lsb = -1; + + rte_atomic64_init(&cmd_q->free_slots); + rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1)); + /* unused slot barrier b/w H&T */ + } + + if (ccp_assign_lsbs(dev)) + CCP_LOG_ERR("Unable to assign lsb region"); + + /* pre-allocate LSB slots */ + for (i = 0; i < dev->cmd_q_count; i++) { + dev->cmd_q[i].sb_key = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_iv = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_sha = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + dev->cmd_q[i].sb_hmac = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + } + + TAILQ_INSERT_TAIL(&ccp_list, dev, next); + return 0; +} + +static void +ccp_remove_device(struct ccp_device *dev) +{ + if (dev == NULL) + return; + + TAILQ_REMOVE(&ccp_list, dev, next); +} + +static int +is_ccp_device(const char *dirname, + const struct rte_pci_id *ccp_id, + int *type) +{ + char filename[PATH_MAX]; + const struct rte_pci_id *id; + uint16_t vendor, device_id; + int i; + unsigned long tmp; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + vendor = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + device_id = (uint16_t)tmp; + + for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) { + if (vendor == id->vendor_id && + device_id == id->device_id) { + *type = i; + return 1; /* Matched device */ + } + } + return 0; +} + +static int +ccp_probe_device(const char *dirname, uint16_t domain, + uint8_t bus, uint8_t devid, + uint8_t function, int ccp_type) +{ + struct ccp_device *ccp_dev = NULL; + struct rte_pci_device *pci; + char filename[PATH_MAX]; + unsigned long tmp; + int uio_fd = -1, i, uio_num; + char uio_devname[PATH_MAX]; + void *map_addr; + + ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), + RTE_CACHE_LINE_SIZE); + if (ccp_dev == NULL) + goto fail; + pci = &(ccp_dev->pci); + + pci->addr.domain = domain; + pci->addr.bus = bus; + pci->addr.devid = devid; + pci->addr.function = function; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.vendor_id = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.device_id = (uint16_t)tmp; + + /* get subsystem_vendor id */ + snprintf(filename, sizeof(filename), "%s/subsystem_vendor", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_vendor_id = (uint16_t)tmp; + + /* get subsystem_device id */ + snprintf(filename, sizeof(filename), "%s/subsystem_device", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_device_id = (uint16_t)tmp; + + /* get class_id */ + snprintf(filename, sizeof(filename), "%s/class", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + /* the least 24 bits are valid: class, subclass, program interface */ + pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; + + /* parse resources */ + snprintf(filename, sizeof(filename), "%s/resource", dirname); + if (ccp_pci_parse_sysfs_resource(filename, pci) < 0) + goto fail; + + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) { + /* + * It may take time for uio device to appear, + * wait here and try again + */ + usleep(100000); + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) + goto fail; + } + snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num); + + uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK); + if (uio_fd < 0) + goto fail; + if (flock(uio_fd, LOCK_EX | LOCK_NB)) + goto fail; + + /* Map the PCI memory resource of device */ + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + + char devname[PATH_MAX]; + int res_fd; + + if (pci->mem_resource[i].phys_addr == 0) + continue; + snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i); + res_fd = open(devname, O_RDWR); + if (res_fd < 0) + goto fail; + map_addr = mmap(NULL, pci->mem_resource[i].len, + PROT_READ | PROT_WRITE, + MAP_SHARED, res_fd, 0); + if (map_addr == MAP_FAILED) + goto fail; + + pci->mem_resource[i].addr = map_addr; + } + + /* device is valid, add in list */ + if (ccp_add_device(ccp_dev, ccp_type)) { + ccp_remove_device(ccp_dev); + goto fail; + } + + return 0; +fail: + CCP_LOG_ERR("CCP Device probe failed"); + if (uio_fd > 0) + close(uio_fd); + if (ccp_dev) + rte_free(ccp_dev); + return -1; +} + +int +ccp_probe_devices(const struct rte_pci_id *ccp_id) +{ + int dev_cnt = 0; + int ccp_type = 0; + struct dirent *d; + DIR *dir; + int ret = 0; + int module_idx = 0; + uint16_t domain; + uint8_t bus, devid, function; + char dirname[PATH_MAX]; + + module_idx = ccp_check_pci_uio_module(); + if (module_idx < 0) + return -1; + + TAILQ_INIT(&ccp_list); + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) + return -1; + while ((d = readdir(dir)) != NULL) { + if (d->d_name[0] == '.') + continue; + if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name), + &domain, &bus, &devid, &function) != 0) + continue; + snprintf(dirname, sizeof(dirname), "%s/%s", + SYSFS_PCI_DEVICES, d->d_name); + if (is_ccp_device(dirname, ccp_id, &ccp_type)) { + printf("CCP : Detected CCP device with ID = 0x%x\n", + ccp_id[ccp_type].device_id); + ret = ccp_probe_device(dirname, domain, bus, devid, + function, ccp_type); + if (ret == 0) + dev_cnt++; + } + } + closedir(dir); + return dev_cnt; +} diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h new file mode 100644 index 0000000000..f594e91686 --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.h @@ -0,0 +1,284 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_DEV_H_ +#define _CCP_DEV_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/**< CCP sspecific */ +#define MAX_HW_QUEUES 5 + +/**< CCP Register Mappings */ +#define Q_MASK_REG 0x000 +#define TRNG_OUT_REG 0x00c + +/* CCP Version 5 Specifics */ +#define CMD_QUEUE_MASK_OFFSET 0x00 +#define CMD_QUEUE_PRIO_OFFSET 0x04 +#define CMD_REQID_CONFIG_OFFSET 0x08 +#define CMD_CMD_TIMEOUT_OFFSET 0x10 +#define LSB_PUBLIC_MASK_LO_OFFSET 0x18 +#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C +#define LSB_PRIVATE_MASK_LO_OFFSET 0x20 +#define LSB_PRIVATE_MASK_HI_OFFSET 0x24 + +#define CMD_Q_CONTROL_BASE 0x0000 +#define CMD_Q_TAIL_LO_BASE 0x0004 +#define CMD_Q_HEAD_LO_BASE 0x0008 +#define CMD_Q_INT_ENABLE_BASE 0x000C +#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010 + +#define CMD_Q_STATUS_BASE 0x0100 +#define CMD_Q_INT_STATUS_BASE 0x0104 + +#define CMD_CONFIG_0_OFFSET 0x6000 +#define CMD_TRNG_CTL_OFFSET 0x6008 +#define CMD_AES_MASK_OFFSET 0x6010 +#define CMD_CLK_GATE_CTL_OFFSET 0x603C + +/* Address offset between two virtual queue registers */ +#define CMD_Q_STATUS_INCR 0x1000 + +/* Bit masks */ +#define CMD_Q_RUN 0x1 +#define CMD_Q_SIZE 0x1F +#define CMD_Q_SHIFT 3 +#define COMMANDS_PER_QUEUE 2048 + +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD_Q_SIZE) +#define Q_DESC_SIZE sizeof(struct ccp_desc) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define ALL_INTERRUPTS (INT_COMPLETION| \ + INT_ERROR| \ + INT_QUEUE_STOPPED) + +#define LSB_REGION_WIDTH 5 +#define MAX_LSB_CNT 8 + +#define LSB_SIZE 16 +#define LSB_ITEM_SIZE 32 +#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) + +/* bitmap */ +enum { + BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT +}; + +#define WORD_OFFSET(b) ((b) / BITS_PER_WORD) +#define BIT_OFFSET(b) ((b) % BITS_PER_WORD) + +#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define CCP_BITMAP_SIZE(nr) \ + CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long)) + +#define CCP_BITMAP_FIRST_WORD_MASK(start) \ + (~0UL << ((start) & (BITS_PER_WORD - 1))) +#define CCP_BITMAP_LAST_WORD_MASK(nbits) \ + (~0UL >> (-(nbits) & (BITS_PER_WORD - 1))) + +#define __ccp_round_mask(x, y) ((typeof(x))((y)-1)) +#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y)) + +/** CCP registers Write/Read */ + +static inline void ccp_pci_reg_write(void *base, int offset, + uint32_t value) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + rte_write32((rte_cpu_to_le_32(value)), reg_addr); +} + +static inline uint32_t ccp_pci_reg_read(void *base, int offset) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + return rte_le_to_cpu_32(rte_read32(reg_addr)); +} + +#define CCP_READ_REG(hw_addr, reg_offset) \ + ccp_pci_reg_read(hw_addr, reg_offset) + +#define CCP_WRITE_REG(hw_addr, reg_offset, value) \ + ccp_pci_reg_write(hw_addr, reg_offset, value) + +TAILQ_HEAD(ccp_list, ccp_device); + +extern struct ccp_list ccp_list; + +/** + * CCP device version + */ +enum ccp_device_version { + CCP_VERSION_5A = 0, + CCP_VERSION_5B, +}; + +/** + * A structure describing a CCP command queue. + */ +struct ccp_queue { + struct ccp_device *dev; + char memz_name[RTE_MEMZONE_NAMESIZE]; + + rte_atomic64_t free_slots; + /**< available free slots updated from enq/deq calls */ + + /* Queue identifier */ + uint64_t id; /**< queue id */ + uint64_t qidx; /**< queue index */ + uint64_t qsize; /**< queue size */ + + /* Queue address */ + struct ccp_desc *qbase_desc; + void *qbase_addr; + phys_addr_t qbase_phys_addr; + /**< queue-page registers addr */ + void *reg_base; + + uint32_t qcontrol; + /**< queue ctrl reg */ + + int lsb; + /**< lsb region assigned to queue */ + unsigned long lsbmask; + /**< lsb regions queue can access */ + unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)]; + /**< all lsb resources which queue is using */ + uint32_t sb_key; + /**< lsb assigned for queue */ + uint32_t sb_iv; + /**< lsb assigned for iv */ + uint32_t sb_sha; + /**< lsb assigned for sha ctx */ + uint32_t sb_hmac; + /**< lsb assigned for hmac ctx */ +} ____cacheline_aligned; + +/** + * A structure describing a CCP device. + */ +struct ccp_device { + TAILQ_ENTRY(ccp_device) next; + int id; + /**< ccp dev id on platform */ + struct ccp_queue cmd_q[MAX_HW_QUEUES]; + /**< ccp queue */ + int cmd_q_count; + /**< no. of ccp Queues */ + struct rte_pci_device pci; + /**< ccp pci identifier */ + unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)]; + /**< shared lsb mask of ccp */ + rte_spinlock_t lsb_lock; + /**< protection for shared lsb region allocation */ + int qidx; + /**< current queue index */ +} __rte_cache_aligned; + +/** + * descriptor for version 5 CPP commands + * 8 32-bit words: + * word 0: function; engine; control bits + * word 1: length of source data + * word 2: low 32 bits of source pointer + * word 3: upper 16 bits of source pointer; source memory type + * word 4: low 32 bits of destination pointer + * word 5: upper 16 bits of destination pointer; destination memory + * type + * word 6: low 32 bits of key pointer + * word 7: upper 16 bits of key pointer; key memory type + */ +struct dword0 { + uint32_t soc:1; + uint32_t ioc:1; + uint32_t rsvd1:1; + uint32_t init:1; + uint32_t eom:1; + uint32_t function:15; + uint32_t engine:4; + uint32_t prot:1; + uint32_t rsvd2:7; +}; + +struct dword3 { + uint32_t src_hi:16; + uint32_t src_mem:2; + uint32_t lsb_cxt_id:8; + uint32_t rsvd1:5; + uint32_t fixed:1; +}; + +union dword4 { + uint32_t dst_lo; /* NON-SHA */ + uint32_t sha_len_lo; /* SHA */ +}; + +union dword5 { + struct { + uint32_t dst_hi:16; + uint32_t dst_mem:2; + uint32_t rsvd1:13; + uint32_t fixed:1; + } + fields; + uint32_t sha_len_hi; +}; + +struct dword7 { + uint32_t key_hi:16; + uint32_t key_mem:2; + uint32_t rsvd1:14; +}; + +struct ccp_desc { + struct dword0 dw0; + uint32_t length; + uint32_t src_lo; + struct dword3 dw3; + union dword4 dw4; + union dword5 dw5; + uint32_t key_lo; + struct dword7 dw7; +}; + +static inline uint32_t +low32_value(unsigned long addr) +{ + return ((uint64_t)addr) & 0x0ffffffff; +} + +static inline uint32_t +high32_value(unsigned long addr) +{ + return ((uint64_t)addr >> 32) & 0x00000ffff; +} + +/** + * Detect ccp platform and initialize all ccp devices + * + * @param ccp_id rte_pci_id list for supported CCP devices + * @return no. of successfully initialized CCP devices + */ +int ccp_probe_devices(const struct rte_pci_id *ccp_id); + +#endif /* _CCP_DEV_H_ */ diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c new file mode 100644 index 0000000000..59152ca5d1 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.c @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include + +#include "ccp_pci.h" + +static const char * const uio_module_names[] = { + "igb_uio", + "uio_pci_generic", +}; + +int +ccp_check_pci_uio_module(void) +{ + FILE *fp; + int i; + char buf[BUFSIZ]; + + fp = fopen(PROC_MODULES, "r"); + if (fp == NULL) + return -1; + i = 0; + while (uio_module_names[i] != NULL) { + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (!strncmp(buf, uio_module_names[i], + strlen(uio_module_names[i]))) + return i; + } + i++; + rewind(fp); + } + printf("Insert igb_uio or uio_pci_generic kernel module(s)"); + return -1;/* uio not inserted */ +} + +/* + * split up a pci address into its constituent parts. + */ +int +ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function) +{ + /* first split on ':' */ + union splitaddr { + struct { + char *domain; + char *bus; + char *devid; + char *function; + }; + char *str[PCI_FMT_NVAL]; + /* last element-separator is "." not ":" */ + } splitaddr; + + char *buf_copy = strndup(buf, bufsize); + + if (buf_copy == NULL) + return -1; + + if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') + != PCI_FMT_NVAL - 1) + goto error; + /* final split is on '.' between devid and function */ + splitaddr.function = strchr(splitaddr.devid, '.'); + if (splitaddr.function == NULL) + goto error; + *splitaddr.function++ = '\0'; + + /* now convert to int values */ + errno = 0; + *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16); + *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16); + *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16); + *function = (uint8_t)strtoul(splitaddr.function, NULL, 10); + if (errno != 0) + goto error; + + free(buf_copy); /* free the copy made with strdup */ + return 0; +error: + free(buf_copy); + return -1; +} + +int +ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val) +{ + FILE *f; + char buf[BUFSIZ]; + char *end = NULL; + + f = fopen(filename, "r"); + if (f == NULL) + return -1; + if (fgets(buf, sizeof(buf), f) == NULL) { + fclose(f); + return -1; + } + *val = strtoul(buf, &end, 0); + if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +/** IO resource type: */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +/* parse one line of the "resource" sysfs file (note that the 'line' + * string is modified) + */ +static int +ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, + uint64_t *end_addr, uint64_t *flags) +{ + union pci_resource_info { + struct { + char *phys_addr; + char *end_addr; + char *flags; + }; + char *ptrs[PCI_RESOURCE_FMT_NVAL]; + } res_info; + + if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) + return -1; + errno = 0; + *phys_addr = strtoull(res_info.phys_addr, NULL, 16); + *end_addr = strtoull(res_info.end_addr, NULL, 16); + *flags = strtoull(res_info.flags, NULL, 16); + if (errno != 0) + return -1; + + return 0; +} + +/* parse the "resource" sysfs file */ +int +ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) +{ + FILE *fp; + char buf[BUFSIZ]; + int i; + uint64_t phys_addr, end_addr, flags; + + fp = fopen(filename, "r"); + if (fp == NULL) + return -1; + + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) + goto error; + if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf), + &phys_addr, &end_addr, &flags) < 0) + goto error; + + if (flags & IORESOURCE_MEM) { + dev->mem_resource[i].phys_addr = phys_addr; + dev->mem_resource[i].len = end_addr - phys_addr + 1; + /* not mapped for now */ + dev->mem_resource[i].addr = NULL; + } + } + fclose(fp); + return 0; + +error: + fclose(fp); + return -1; +} + +int +ccp_find_uio_devname(const char *dirname) +{ + + DIR *dir; + struct dirent *e; + char dirname_uio[PATH_MAX]; + unsigned int uio_num; + int ret = -1; + + /* depending on kernel version, uio can be located in uio/uioX + * or uio:uioX + */ + snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname); + dir = opendir(dirname_uio); + if (dir == NULL) { + /* retry with the parent directory might be different kernel version*/ + dir = opendir(dirname); + if (dir == NULL) + return -1; + } + + /* take the first file starting with "uio" */ + while ((e = readdir(dir)) != NULL) { + /* format could be uio%d ...*/ + int shortprefix_len = sizeof("uio") - 1; + /* ... or uio:uio%d */ + int longprefix_len = sizeof("uio:uio") - 1; + char *endptr; + + if (strncmp(e->d_name, "uio", 3) != 0) + continue; + + /* first try uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + shortprefix_len)) { + ret = uio_num; + break; + } + + /* then try uio:uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + longprefix_len)) { + ret = uio_num; + break; + } + } + closedir(dir); + return ret; + + +} diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h new file mode 100644 index 0000000000..7ed3bac406 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PCI_H_ +#define _CCP_PCI_H_ + +#include + +#include + +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" +#define PROC_MODULES "/proc/modules" + +int ccp_check_pci_uio_module(void); + +int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function); + +int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val); + +int ccp_pci_parse_sysfs_resource(const char *filename, + struct rte_pci_device *dev); + +int ccp_find_uio_devname(const char *dirname); + +#endif /* _CCP_PCI_H_ */ diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c new file mode 100644 index 0000000000..099eb219d9 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_ops.c @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include + +struct rte_cryptodev_ops ccp_ops = { + .dev_configure = NULL, + .dev_start = NULL, + .dev_stop = NULL, + .dev_close = NULL, + + .stats_get = NULL, + .stats_reset = NULL, + + .dev_infos_get = NULL, + + .queue_pair_setup = NULL, + .queue_pair_release = NULL, + .queue_pair_start = NULL, + .queue_pair_stop = NULL, + .queue_pair_count = NULL, + + .session_get_size = NULL, + .session_configure = NULL, + .session_clear = NULL, +}; + +struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops; diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h new file mode 100644 index 0000000000..0da9b92552 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_private.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PMD_PRIVATE_H_ +#define _CCP_PMD_PRIVATE_H_ + +#include + +#define CRYPTODEV_NAME_CCP_PMD crypto_ccp + +#define CCP_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_CCP_DEBUG +#define CCP_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#define CCP_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) +#else +#define CCP_LOG_INFO(fmt, args...) +#define CCP_LOG_DBG(fmt, args...) +#endif + +/**< Maximum queue pairs supported by CCP PMD */ +#define CCP_PMD_MAX_QUEUE_PAIRS 1 +#define CCP_NB_MAX_DESCRIPTORS 1024 +#define CCP_MAX_BURST 64 + +/* private data structure for each CCP crypto device */ +struct ccp_private { + unsigned int max_nb_qpairs; /**< Max number of queue pairs */ + unsigned int max_nb_sessions; /**< Max number of sessions */ + uint8_t crypto_num_dev; /**< Number of working crypto devices */ +}; + +/**< device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *ccp_pmd_ops; + +uint16_t +ccp_cpu_pmd_enqueue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); +uint16_t +ccp_cpu_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); + +#endif /* _CCP_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c index bc8c449df5..8afdb91438 100644 --- a/drivers/crypto/ccp/rte_ccp_pmd.c +++ b/drivers/crypto/ccp/rte_ccp_pmd.c @@ -2,23 +2,170 @@ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. */ +#include #include +#include +#include #include #include +#include +#include +#include +#include "ccp_dev.h" +#include "ccp_pmd_private.h" + +/** + * Global static parameter used to find if CCP device is already initialized. + */ +static unsigned int ccp_pmd_init_done; uint8_t ccp_cryptodev_driver_id; +static uint16_t +ccp_pmd_enqueue_burst(void *queue_pair __rte_unused, + struct rte_crypto_op **ops __rte_unused, + uint16_t nb_ops __rte_unused) +{ + uint16_t enq_cnt = 0; + + return enq_cnt; +} + +static uint16_t +ccp_pmd_dequeue_burst(void *queue_pair __rte_unused, + struct rte_crypto_op **ops __rte_unused, + uint16_t nb_ops __rte_unused) +{ + uint16_t nb_dequeued = 0; + + return nb_dequeued; +} + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id ccp_pci_id[] = { + { + RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ + }, + { + RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ + }, + {.device_id = 0}, +}; + /** Remove ccp pmd */ static int -cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused) +cryptodev_ccp_remove(struct rte_vdev_device *dev) { + const char *name; + + ccp_pmd_init_done = 0; + name = rte_vdev_device_name(dev); + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", + name, rte_socket_id()); + return 0; } +/** Create crypto device */ +static int +cryptodev_ccp_create(const char *name, + struct rte_vdev_device *vdev, + struct rte_cryptodev_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct ccp_private *internals; + uint8_t cryptodev_cnt = 0; + + if (init_params->name[0] == '\0') + snprintf(init_params->name, sizeof(init_params->name), + "%s", name); + + dev = rte_cryptodev_pmd_create(init_params->name, + &vdev->device, + init_params); + if (dev == NULL) { + CCP_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + cryptodev_cnt = ccp_probe_devices(ccp_pci_id); + + if (cryptodev_cnt == 0) { + CCP_LOG_ERR("failed to detect CCP crypto device"); + goto init_error; + } + + printf("CCP : Crypto device count = %d\n", cryptodev_cnt); + dev->driver_id = ccp_cryptodev_driver_id; + + /* register rx/tx burst functions for data path */ + dev->dev_ops = ccp_pmd_ops; + dev->enqueue_burst = ccp_pmd_enqueue_burst; + dev->dequeue_burst = ccp_pmd_dequeue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + + internals = dev->data->dev_private; + + internals->max_nb_qpairs = init_params->max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + internals->crypto_num_dev = cryptodev_cnt; + + return 0; + +init_error: + CCP_LOG_ERR("driver %s: %s() failed", + init_params->name, __func__); + cryptodev_ccp_remove(vdev); + + return -EFAULT; +} + /** Probe ccp pmd */ static int -cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused) +cryptodev_ccp_probe(struct rte_vdev_device *vdev) { + int rc = 0; + const char *name; + struct rte_cryptodev_pmd_init_params init_params = { + "", + sizeof(struct ccp_private), + rte_socket_id(), + CCP_PMD_MAX_QUEUE_PAIRS, + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + }; + const char *input_args; + + if (ccp_pmd_init_done) { + RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); + return -EFAULT; + } + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + input_args = rte_vdev_device_args(vdev); + rte_cryptodev_pmd_parse_input_args(&init_params, input_args); + init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.socket_id); + RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", + init_params.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, "Max number of sessions = %d\n", + init_params.max_nb_sessions); + + rc = cryptodev_ccp_create(name, vdev, &init_params); + if (rc) + return rc; + ccp_pmd_init_done = 1; return 0; }