#include "qat_logs.h"
#include "qat_sym_session.h"
#include "qat_sym.h"
+#include "qat_qp.h"
#include "adf_transport_access_macros.h"
#define BYTE_LENGTH 8
/** Creates a context in either AES or DES in ECB mode
* Depends on openssl libcrypto
*/
-static inline uint32_t
-adf_modulo(uint32_t data, uint32_t shift);
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
struct rte_crypto_op *op)
{
int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
}
static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
struct rte_crypto_op *op)
{
int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
return sym_op->cipher.data.length - last_block_len;
}
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, q->tail);
- q->nb_pending_requests = 0;
- q->csr_tail = q->tail;
-}
-
uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register struct rte_crypto_op **cur_op = ops;
- register int ret;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
- int overflow;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- tmp_qp->inflights16 += nb_ops;
- overflow = tmp_qp->inflights16 - queue->max_inflights;
- if (overflow > 0) {
- tmp_qp->inflights16 -= overflow;
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
- if (ret != 0) {
- tmp_qp->stats.enqueue_err_count++;
- /*
- * This message cannot be enqueued,
- * decrease number of ops that wasn't sent
- */
- tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
-
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_ops_sent++;
- cur_op++;
- }
-kick_tail:
- queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- queue->nb_pending_requests += nb_ops_sent;
- if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
- queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
- txq_write_tail(tmp_qp, queue);
- }
- return nb_ops_sent;
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
}
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+int
+qat_sym_process_response(void **op, uint8_t *resp,
+ __rte_unused void *op_cookie,
+ __rte_unused enum qat_device_gen qat_dev_gen)
{
- uint32_t old_head, new_head;
- uint32_t max_head;
- old_head = q->csr_head;
- new_head = q->head;
- max_head = qp->nb_descriptors * q->msg_size;
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)q->base_addr + old_head;
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
- q->nb_processed_responses = 0;
- q->csr_head = new_head;
+ *op = (void *)rx_op;
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, new_head);
+ return 0;
}
+
uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct qat_queue *rx_queue, *tx_queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- uint32_t msg_counter = 0;
- struct rte_crypto_op *rx_op;
- struct icp_qat_fw_comn_resp *resp_msg;
- uint32_t head;
-
- rx_queue = &(tmp_qp->rx_q);
- tx_queue = &(tmp_qp->tx_q);
- head = rx_queue->head;
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
-
- while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_ops) {
- rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_session *sess = (struct qat_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
-
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
- *ops = rx_op;
- ops++;
- msg_counter++;
- }
- if (msg_counter > 0) {
- rx_queue->head = head;
- tmp_qp->stats.dequeued_count += msg_counter;
- rx_queue->nb_processed_responses += msg_counter;
- tmp_qp->inflights16 -= msg_counter;
-
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, rx_queue);
- }
- /* also check if tail needs to be advanced */
- if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
- tx_queue->tail != tx_queue->csr_tail) {
- txq_write_tail(tmp_qp, tx_queue);
- }
- return msg_counter;
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
}
static inline int
iv_length);
}
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
{
int ret = 0;
- struct qat_session *ctx;
+ struct qat_sym_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
return -EINVAL;
}
- ctx = (struct qat_session *)get_session_private_data(
+ ctx = (struct qat_sym_session *)get_session_private_data(
op->sym->session, cryptodev_qat_driver_id);
if (unlikely(ctx == NULL)) {
return -EINVAL;
}
- if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
return -EINVAL;
ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
QAT_COMN_PTR_TYPE_SGL);
ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &qat_op_cookie->qat_sgl_list_src,
+ &cookie->qat_sgl_list_src,
qat_req->comn_mid.src_length);
if (ret) {
PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
if (likely(op->sym->m_dst == NULL))
qat_req->comn_mid.dest_data_addr =
qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
+ cookie->qat_sgl_src_phys_addr;
else {
ret = qat_sgl_fill_array(op->sym->m_dst,
dst_buf_start,
- &qat_op_cookie->qat_sgl_list_dst,
+ &cookie->qat_sgl_list_dst,
qat_req->comn_mid.dst_length);
if (ret) {
}
qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
+ cookie->qat_sgl_src_phys_addr;
qat_req->comn_mid.dest_data_addr =
- qat_op_cookie->qat_sgl_dst_phys_addr;
+ cookie->qat_sgl_dst_phys_addr;
}
} else {
qat_req->comn_mid.src_data_addr = src_buf_start;
return 0;
}
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
-{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
-}
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+void qat_sym_stats_get(struct rte_cryptodev *dev,
struct rte_cryptodev_stats *stats)
{
int i;
}
}
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
+void qat_sym_stats_reset(struct rte_cryptodev *dev)
{
int i;
struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);