#include <rte_prefetch.h>
#include "qat_logs.h"
-#include "qat_qp.h"
#include "qat_device.h"
+#include "qat_qp.h"
+#include "qat_sym.h"
#include "adf_transport_access_macros.h"
__extension__
const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES]
- [ADF_MAX_QPS_PER_BUNDLE] = {
+ [ADF_MAX_QPS_ON_ANY_SERVICE] = {
/* queue pairs which provide an asymmetric crypto service */
[QAT_SERVICE_ASYMMETRIC] = {
{
}, {
.service_type = QAT_SERVICE_ASYMMETRIC,
+ .hw_bundle_num = 0,
.tx_ring_num = 1,
.rx_ring_num = 9,
.tx_msg_size = 64,
.rx_msg_size = 32,
- }, {
- .service_type = QAT_SERVICE_INVALID,
- }, {
- .service_type = QAT_SERVICE_INVALID,
}
},
/* queue pairs which provide a symmetric crypto service */
.rx_ring_num = 11,
.tx_msg_size = 128,
.rx_msg_size = 32,
- }, {
- .service_type = QAT_SERVICE_INVALID,
- }, {
- .service_type = QAT_SERVICE_INVALID,
}
},
/* queue pairs which provide a compression service */
.rx_ring_num = 15,
.tx_msg_size = 128,
.rx_msg_size = 32,
- }, {
- .service_type = QAT_SERVICE_INVALID,
- }, {
- .service_type = QAT_SERVICE_INVALID,
}
}
};
static int qat_qp_check_queue_alignment(uint64_t phys_addr,
uint32_t queue_size_bytes);
static void qat_queue_delete(struct qat_queue *queue);
-static int qat_queue_create(struct qat_pmd_private *qat_dev,
+static int qat_queue_create(struct qat_pci_device *qat_dev,
struct qat_queue *queue, struct qat_qp_config *, uint8_t dir);
static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
uint32_t *queue_size_for_csr);
static void adf_configure_queues(struct qat_qp *queue);
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock);
int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
{
int i, count;
- for (i = 0, count = 0; i < ADF_MAX_QPS_PER_BUNDLE; i++)
+ for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++)
if (qp_hw_data[i].service_type == service)
count++;
- return count * ADF_NUM_BUNDLES_PER_DEV;
+ return count;
}
static const struct rte_memzone *
socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
}
-int qat_qp_setup(struct qat_pmd_private *qat_dev,
+int qat_qp_setup(struct qat_pci_device *qat_dev,
struct qat_qp **qp_addr,
uint16_t queue_pair_id,
struct qat_qp_config *qat_qp_conf)
char op_cookie_pool_name[RTE_RING_NAMESIZE];
uint32_t i;
- PMD_DRV_LOG(DEBUG, "Setup qp %u on device %d gen %d",
- queue_pair_id, qat_dev->dev_id, qat_dev->qat_dev_gen);
+ PMD_DRV_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d",
+ queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen);
if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) ||
(qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) {
}
adf_configure_queues(qp);
- adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr,
+ &qat_dev->arb_csr_lock);
- snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_%s_qp_op_%d_%hu",
- pci_dev->driver->driver.name, qat_qp_conf->service_str,
- qat_dev->dev_id, queue_pair_id);
+ snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE,
+ "%s%d_cookies_%s_qp%hu",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qat_qp_conf->service_str, queue_pair_id);
+ PMD_DRV_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name);
qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
if (qp->op_cookie_pool == NULL)
qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
qp->qat_dev_gen = qat_dev->qat_dev_gen;
qp->build_request = qat_qp_conf->build_request;
- qp->process_response = qat_qp_conf->process_response;
qp->qat_dev = qat_dev;
PMD_DRV_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s",
return 0;
create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
rte_free(qp);
return -EFAULT;
}
}
PMD_DRV_LOG(DEBUG, "Free qp on qat_pci device %d",
- qp->qat_dev->dev_id);
+ qp->qat_dev->qat_dev_id);
/* Don't free memory if there are still responses to be processed */
if (qp->inflights16 == 0) {
return -EAGAIN;
}
- adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr,
+ &qp->qat_dev->arb_csr_lock);
for (i = 0; i < qp->nb_descriptors; i++)
rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
}
static int
-qat_queue_create(struct qat_pmd_private *qat_dev, struct qat_queue *queue,
+qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue,
struct qat_qp_config *qp_conf, uint8_t dir)
{
uint64_t queue_base;
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name),
- "%s_%s_%s_%d_%d_%d",
- pci_dev->driver->driver.name, qp_conf->service_str,
- "qp_mem", qat_dev->dev_id,
+ "%s_%d_%s_%s_%d_%d",
+ pci_dev->driver->driver.name, qat_dev->qat_dev_id,
+ qp_conf->service_str, "qp_mem",
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
qp_conf->socket_id);
queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
ADF_BYTES_TO_MSG_SIZE(desc_size));
- queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
+ queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1;
if (queue->max_inflights < 2) {
PMD_DRV_LOG(ERR, "Invalid num inflights");
queue->hw_queue_number, queue_base);
PMD_DRV_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u,"
- " nb msgs %u, msg_size %u, max_inflights %u modulo %u",
+ " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u",
queue->memz_name,
queue->queue_size, queue_size_bytes,
qp_conf->nb_descriptors, desc_size,
- queue->max_inflights, queue->modulo);
+ queue->max_inflights, queue->modulo_mask);
return 0;
return -EINVAL;
}
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
{
uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
(ADF_ARB_REG_SLOT *
uint32_t value;
PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(lock);
value = ADF_CSR_RD(base_addr, arb_csr_offset);
value |= (0x01 << txq->hw_queue_number);
ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
}
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr,
+ rte_spinlock_t *lock)
{
uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
(ADF_ARB_REG_SLOT *
uint32_t value;
PMD_INIT_FUNC_TRACE();
+
+ rte_spinlock_lock(lock);
value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value ^= (0x01 << txq->hw_queue_number);
+ value &= ~(0x01 << txq->hw_queue_number);
ADF_CSR_WR(base_addr, arb_csr_offset, value);
+ rte_spinlock_unlock(lock);
}
static void adf_configure_queues(struct qat_qp *qp)
queue->hw_queue_number, queue_config);
}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask)
{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
+ return data & modulo_mask;
}
static inline void
goto kick_tail;
}
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask);
ops++;
nb_ops_sent++;
}
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
resp_counter != nb_ops) {
- tmp_qp->process_response(ops, resp_msg,
- tmp_qp->op_cookies[head / rx_queue->msg_size],
- tmp_qp->qat_dev_gen);
+ qat_sym_process_response(ops, resp_msg);
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
+ head = adf_modulo(head + rx_queue->msg_size,
+ rx_queue->modulo_mask);
resp_msg = (uint8_t *)rx_queue->base_addr + head;
ops++;