return NULL;
}
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
static const struct rte_memzone *
ccp_queue_dma_zone_reserve(const char *queue_name,
uint32_t queue_size,
/**< CCP sspecific */
#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
/**< CCP Register Mappings */
#define Q_MASK_REG 0x000
/**< protection for shared lsb region allocation */
int qidx;
/**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
} __rte_cache_aligned;
/**< CCP H/W engine related */
*/
struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
#endif /* _CCP_DEV_H_ */