1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
13 #include <rte_bus_pci.h>
14 #include <rte_atomic.h>
15 #include <rte_byteorder.h>
18 #include <rte_spinlock.h>
19 #include <rte_crypto_sym.h>
20 #include <rte_cryptodev.h>
23 #define MAX_HW_QUEUES 5
24 #define CCP_MAX_TRNG_RETRIES 10
26 /**< CCP Register Mappings */
27 #define Q_MASK_REG 0x000
28 #define TRNG_OUT_REG 0x00c
30 /* CCP Version 5 Specifics */
31 #define CMD_QUEUE_MASK_OFFSET 0x00
32 #define CMD_QUEUE_PRIO_OFFSET 0x04
33 #define CMD_REQID_CONFIG_OFFSET 0x08
34 #define CMD_CMD_TIMEOUT_OFFSET 0x10
35 #define LSB_PUBLIC_MASK_LO_OFFSET 0x18
36 #define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
37 #define LSB_PRIVATE_MASK_LO_OFFSET 0x20
38 #define LSB_PRIVATE_MASK_HI_OFFSET 0x24
40 #define CMD_Q_CONTROL_BASE 0x0000
41 #define CMD_Q_TAIL_LO_BASE 0x0004
42 #define CMD_Q_HEAD_LO_BASE 0x0008
43 #define CMD_Q_INT_ENABLE_BASE 0x000C
44 #define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
46 #define CMD_Q_STATUS_BASE 0x0100
47 #define CMD_Q_INT_STATUS_BASE 0x0104
49 #define CMD_CONFIG_0_OFFSET 0x6000
50 #define CMD_TRNG_CTL_OFFSET 0x6008
51 #define CMD_AES_MASK_OFFSET 0x6010
52 #define CMD_CLK_GATE_CTL_OFFSET 0x603C
54 /* Address offset between two virtual queue registers */
55 #define CMD_Q_STATUS_INCR 0x1000
59 #define CMD_Q_SIZE 0x1F
61 #define COMMANDS_PER_QUEUE 2048
63 #define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
65 #define Q_DESC_SIZE sizeof(struct ccp_desc)
66 #define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
68 #define INT_COMPLETION 0x1
70 #define INT_QUEUE_STOPPED 0x4
71 #define ALL_INTERRUPTS (INT_COMPLETION| \
75 #define LSB_REGION_WIDTH 5
79 #define LSB_ITEM_SIZE 32
80 #define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
82 /* General CCP Defines */
84 #define CCP_SB_BYTES 32
88 BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
91 #define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
92 #define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
94 #define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
95 #define CCP_BITMAP_SIZE(nr) \
96 CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
98 #define CCP_BITMAP_FIRST_WORD_MASK(start) \
99 (~0UL << ((start) & (BITS_PER_WORD - 1)))
100 #define CCP_BITMAP_LAST_WORD_MASK(nbits) \
101 (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
103 #define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
104 #define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
106 /** CCP registers Write/Read */
108 static inline void ccp_pci_reg_write(void *base, int offset,
111 volatile void *reg_addr = ((uint8_t *)base + offset);
113 rte_write32((rte_cpu_to_le_32(value)), reg_addr);
116 static inline uint32_t ccp_pci_reg_read(void *base, int offset)
118 volatile void *reg_addr = ((uint8_t *)base + offset);
120 return rte_le_to_cpu_32(rte_read32(reg_addr));
123 #define CCP_READ_REG(hw_addr, reg_offset) \
124 ccp_pci_reg_read(hw_addr, reg_offset)
126 #define CCP_WRITE_REG(hw_addr, reg_offset, value) \
127 ccp_pci_reg_write(hw_addr, reg_offset, value)
129 TAILQ_HEAD(ccp_list, ccp_device);
131 extern struct ccp_list ccp_list;
136 enum ccp_device_version {
142 * A structure describing a CCP command queue.
145 struct ccp_device *dev;
146 char memz_name[RTE_MEMZONE_NAMESIZE];
148 rte_atomic64_t free_slots;
149 /**< available free slots updated from enq/deq calls */
151 /* Queue identifier */
152 uint64_t id; /**< queue id */
153 uint64_t qidx; /**< queue index */
154 uint64_t qsize; /**< queue size */
157 struct ccp_desc *qbase_desc;
159 phys_addr_t qbase_phys_addr;
160 /**< queue-page registers addr */
164 /**< queue ctrl reg */
167 /**< lsb region assigned to queue */
168 unsigned long lsbmask;
169 /**< lsb regions queue can access */
170 unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
171 /**< all lsb resources which queue is using */
173 /**< lsb assigned for queue */
175 /**< lsb assigned for iv */
177 /**< lsb assigned for sha ctx */
179 /**< lsb assigned for hmac ctx */
180 } ____cacheline_aligned;
183 * A structure describing a CCP device.
186 TAILQ_ENTRY(ccp_device) next;
188 /**< ccp dev id on platform */
189 struct ccp_queue cmd_q[MAX_HW_QUEUES];
192 /**< no. of ccp Queues */
193 struct rte_pci_device pci;
194 /**< ccp pci identifier */
195 unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
196 /**< shared lsb mask of ccp */
197 rte_spinlock_t lsb_lock;
198 /**< protection for shared lsb region allocation */
200 /**< current queue index */
202 /**< retry counter for CCP TRNG */
203 } __rte_cache_aligned;
205 /**< CCP H/W engine related */
207 * ccp_engine - CCP operation identifiers
209 * @CCP_ENGINE_AES: AES operation
210 * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
211 * @CCP_ENGINE_3DES: DES/3DES operation
212 * @CCP_ENGINE_SHA: SHA operation
213 * @CCP_ENGINE_RSA: RSA operation
214 * @CCP_ENGINE_PASSTHRU: pass-through operation
215 * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
216 * @CCP_ENGINE_ECC: ECC operation
220 CCP_ENGINE_XTS_AES_128,
225 CCP_ENGINE_ZLIB_DECOMPRESS,
230 /* Passthru engine */
232 * ccp_passthru_bitwise - type of bitwise passthru operation
234 * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
235 * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
236 * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
237 * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
238 * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
240 enum ccp_passthru_bitwise {
241 CCP_PASSTHRU_BITWISE_NOOP = 0,
242 CCP_PASSTHRU_BITWISE_AND,
243 CCP_PASSTHRU_BITWISE_OR,
244 CCP_PASSTHRU_BITWISE_XOR,
245 CCP_PASSTHRU_BITWISE_MASK,
246 CCP_PASSTHRU_BITWISE__LAST,
250 * ccp_passthru_byteswap - type of byteswap passthru operation
252 * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
253 * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
254 * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
256 enum ccp_passthru_byteswap {
257 CCP_PASSTHRU_BYTESWAP_NOOP = 0,
258 CCP_PASSTHRU_BYTESWAP_32BIT,
259 CCP_PASSTHRU_BYTESWAP_256BIT,
260 CCP_PASSTHRU_BYTESWAP__LAST,
266 struct ccp_passthru {
267 phys_addr_t src_addr;
268 phys_addr_t dest_addr;
269 enum ccp_passthru_bitwise bit_mod;
270 enum ccp_passthru_byteswap byte_swap;
275 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
323 * descriptor for version 5 CPP commands
325 * word 0: function; engine; control bits
326 * word 1: length of source data
327 * word 2: low 32 bits of source pointer
328 * word 3: upper 16 bits of source pointer; source memory type
329 * word 4: low 32 bits of destination pointer
330 * word 5: upper 16 bits of destination pointer; destination memory
332 * word 6: low 32 bits of key pointer
333 * word 7: upper 16 bits of key pointer; key memory type
341 uint32_t function:15;
350 uint32_t lsb_cxt_id:8;
356 uint32_t dst_lo; /* NON-SHA */
357 uint32_t sha_len_lo; /* SHA */
389 * cmd id to follow order
397 CCP_CMD_NOT_SUPPORTED,
400 static inline uint32_t
401 low32_value(unsigned long addr)
403 return ((uint64_t)addr) & 0x0ffffffff;
406 static inline uint32_t
407 high32_value(unsigned long addr)
409 return ((uint64_t)addr >> 32) & 0x00000ffff;
415 int ccp_dev_start(struct rte_cryptodev *dev);
418 * Detect ccp platform and initialize all ccp devices
420 * @param ccp_id rte_pci_id list for supported CCP devices
421 * @return no. of successfully initialized CCP devices
423 int ccp_probe_devices(const struct rte_pci_id *ccp_id);
426 * allocate a ccp command queue
428 * @dev rte crypto device
429 * @param slot_req number of required
430 * @return allotted CCP queue on success otherwise NULL
432 struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
437 * @param trng_value data pointer to write RNG value
438 * @return 0 on success otherwise -1
440 int ccp_read_hwrng(uint32_t *trng_value);
442 #endif /* _CCP_DEV_H_ */