#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
#define ADF_NUM_BUNDLES_PER_DEV 1
-#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+/* Maximum number of qps for any service type */
+#define ADF_MAX_QPS_PER_BUNDLE 4
#define ADF_RING_DIR_TX 0
#define ADF_RING_DIR_RX 1
#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
ADF_RING_CSR_INT_FLAG_AND_COL, value)
-#endif
+
+#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */
QAT_GEN2,
};
+enum qat_service_type {
+ QAT_SERVICE_ASYMMETRIC = 0,
+ QAT_SERVICE_SYMMETRIC,
+ QAT_SERVICE_COMPRESSION,
+ QAT_SERVICE_INVALID
+};
+#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID)
+
/**< Common struct for scatter-gather list operations */
struct qat_alg_buf {
uint32_t len;
#include "qat_device.h"
#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
+
+/* Hardware device information per generation */
+__extension__
+struct qat_gen_hw_data qp_gen_config[] = {
+ [QAT_GEN1] = {
+ .dev_gen = QAT_GEN1,
+ .qp_hw_data = qat_gen1_qps,
+ },
+ [QAT_GEN2] = {
+ .dev_gen = QAT_GEN2,
+ .qp_hw_data = qat_gen1_qps,
+ /* gen2 has same ring layout as gen1 */
+ },
+};
int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
__rte_unused struct rte_cryptodev_config *config)
struct rte_cryptodev_info *info)
{
struct qat_pmd_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qp_gen_config[internals->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
PMD_INIT_FUNC_TRACE();
if (info != NULL) {
info->max_nb_queue_pairs =
- ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV;
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
info->feature_flags = dev->feature_flags;
info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
#include <rte_bus_pci.h>
#include "qat_common.h"
#include "qat_logs.h"
+#include "adf_transport_access_macros.h"
+#include "qat_qp.h"
extern uint8_t cryptodev_qat_driver_id;
/**< Device ID for this instance */
};
+struct qat_gen_hw_data {
+ enum qat_device_gen dev_gen;
+ const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_PER_BUNDLE];
+};
+
+extern struct qat_gen_hw_data qp_gen_config[];
+
int qat_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config);
int qat_dev_start(struct rte_cryptodev *dev);
#include "qat_logs.h"
#include "qat_qp.h"
-#include "qat_sym.h"
-
+#include "qat_device.h"
#include "adf_transport_access_macros.h"
+
#define ADF_MAX_DESC 4096
#define ADF_MIN_DESC 128
ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
(ADF_ARB_REG_SLOT * index), value)
+__extension__
+const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
+ [ADF_MAX_QPS_PER_BUNDLE] = {
+ /* queue pairs which provide an asymmetric crypto service */
+ [QAT_SERVICE_ASYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 0,
+ .rx_ring_num = 8,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+
+ }, {
+ .service_type = QAT_SERVICE_ASYMMETRIC,
+ .tx_ring_num = 1,
+ .rx_ring_num = 9,
+ .tx_msg_size = 64,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }
+ },
+ /* queue pairs which provide a symmetric crypto service */
+ [QAT_SERVICE_SYMMETRIC] = {
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 2,
+ .rx_ring_num = 10,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ },
+ {
+ .service_type = QAT_SERVICE_SYMMETRIC,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 3,
+ .rx_ring_num = 11,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }
+ },
+ /* queue pairs which provide a compression service */
+ [QAT_SERVICE_COMPRESSION] = {
+ {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 6,
+ .rx_ring_num = 14,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_COMPRESSION,
+ .hw_bundle_num = 0,
+ .tx_ring_num = 7,
+ .rx_ring_num = 15,
+ .tx_msg_size = 128,
+ .rx_msg_size = 32,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }, {
+ .service_type = QAT_SERVICE_INVALID,
+ }
+ }
+};
+
static int qat_qp_check_queue_alignment(uint64_t phys_addr,
uint32_t queue_size_bytes);
static void qat_queue_delete(struct qat_queue *queue);
static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
+
+int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service)
+{
+ int i, count;
+
+ for (i = 0, count = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++)
+ if (qp_hw_data[i].service_type == service)
+ count++;
+ return count * ADF_NUM_BUNDLES_PER_DEV;
+}
+
static const struct rte_memzone *
queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int socket_id)
struct rte_pci_device *pci_dev = qat_dev->pci_dev;
int ret = 0;
uint16_t desc_size = (dir == ADF_RING_DIR_TX ?
- qp_conf->tx_msg_size : qp_conf->rx_msg_size);
+ qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size);
uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size);
- queue->hw_bundle_number = qp_conf->hw_bundle_num;
+ queue->hw_bundle_number = qp_conf->hw->hw_bundle_num;
queue->hw_queue_number = (dir == ADF_RING_DIR_TX ?
- qp_conf->tx_ring_num : qp_conf->rx_ring_num);
+ qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num);
if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
#define _QAT_QP_H_
#include "qat_common.h"
-#include "qat_device.h"
+#include <rte_cryptodev_pmd.h>
+#include "adf_transport_access_macros.h"
#define QAT_CSR_HEAD_WRITE_THRESH 32U
/* number of requests to accumulate before writing head CSR */
/**
* Structure with data needed for creation of queue pair.
*/
-struct qat_qp_config {
+struct qat_qp_hw_data {
+ enum qat_service_type service_type;
uint8_t hw_bundle_num;
uint8_t tx_ring_num;
uint8_t rx_ring_num;
uint16_t tx_msg_size;
uint16_t rx_msg_size;
+};
+/**
+ * Structure with data needed for creation of queue pair.
+ */
+struct qat_qp_config {
+ const struct qat_qp_hw_data *hw;
uint32_t nb_descriptors;
uint32_t cookie_size;
int socket_id;
/**< qat device this qp is on */
} __rte_cache_aligned;
+extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_PER_BUNDLE];
+
uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);
qat_qp_setup(struct qat_pmd_private *qat_dev,
struct qat_qp **qp_addr, uint16_t queue_pair_id,
struct qat_qp_config *qat_qp_conf);
+
+int
+qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
+ enum qat_service_type service);
#endif /* _QAT_QP_H_ */
#include "qat_sym.h"
#include "qat_qp.h"
#include "adf_transport_access_macros.h"
+#include "qat_device.h"
#define BYTE_LENGTH 8
/* bpi is only used for partial blocks of DES and AES
*/
#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-#define ADF_SYM_TX_RING_DESC_SIZE 128
-#define ADF_SYM_RX_RING_DESC_SIZE 32
-#define ADF_SYM_TX_QUEUE_STARTOFF 2
-/* Offset from bundle start to 1st Sym Tx queue */
-#define ADF_SYM_RX_QUEUE_STARTOFF 10
-
/** Encrypt a single partial block
* Depends on openssl libcrypto
* Uses ECB+XOR to do CFB encryption, same result, more performant
PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
}
-
-
int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
{
PMD_DRV_LOG(DEBUG, "Release sym qp %u on device %d",
queue_pair_id, dev->data->dev_id);
+
return qat_qp_release((struct qat_qp **)
&(dev->data->queue_pairs[queue_pair_id]));
}
int ret = 0;
uint32_t i;
struct qat_qp_config qat_qp_conf;
+
struct qat_qp **qp_addr =
(struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
struct qat_pmd_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qp_gen_config[qat_private->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
/* If qp is already in use free ring memory and qp metadata. */
if (*qp_addr != NULL) {
if (ret < 0)
return ret;
}
- if (qp_id >= (ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV)) {
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", qp_id);
return -EINVAL;
}
- qat_qp_conf.hw_bundle_num = (qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE);
- qat_qp_conf.tx_ring_num = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_TX_QUEUE_STARTOFF;
- qat_qp_conf.rx_ring_num = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_RX_QUEUE_STARTOFF;
- qat_qp_conf.tx_msg_size = ADF_SYM_TX_RING_DESC_SIZE;
- qat_qp_conf.rx_msg_size = ADF_SYM_RX_RING_DESC_SIZE;
+ qat_qp_conf.hw = qp_hw_data;
qat_qp_conf.build_request = qat_sym_build_request;
qat_qp_conf.process_response = qat_sym_process_response;
qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
}
return ret;
-
}