qat_sym_process_response(ops, resp_msg);
else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION)
qat_comp_process_response(ops, resp_msg,
- &tmp_qp->stats.dequeue_err_count);
+ tmp_qp->op_cookies[head / rx_queue->msg_size],
+ &tmp_qp->stats.dequeue_err_count);
else if (tmp_qp->service_type == QAT_SERVICE_ASYMMETRIC) {
#ifdef BUILD_QAT_ASYM
qat_asym_process_response(ops, resp_msg,
__rte_weak int
qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
+ void *op_cookie __rte_unused,
uint64_t *dequeue_err_count __rte_unused)
{
return 0;
/* Needed for weak function*/
int
qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
- uint64_t *dequeue_err_count);
+ void *op_cookie __rte_unused,
+ uint64_t *dequeue_err_count __rte_unused);
#endif /* _QAT_QP_H_ */
rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
}
+ if (unlikely(rte_pktmbuf_pkt_len(op->m_dst) < QAT_MIN_OUT_BUF_SIZE)) {
+ /* QAT doesn't support dest. buffer lower
+ * than QAT_MIN_OUT_BUF_SIZE. Propagate error mark
+ * by converting this request to the null one
+ * and check the status in the response.
+ */
+ QAT_DP_LOG(WARNING, "QAT destination buffer too small - resend with larger buffer");
+ comp_req->comn_hdr.service_type = ICP_QAT_FW_COMN_REQ_NULL;
+ comp_req->comn_hdr.service_cmd_id = ICP_QAT_FW_NULL_REQ_SERV_ID;
+ cookie->error = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ }
+
#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
QAT_DP_LOG(DEBUG, "Direction: %s",
qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
}
int
-qat_comp_process_response(void **op, uint8_t *resp, uint64_t *dequeue_err_count)
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
+ uint64_t *dequeue_err_count)
{
struct icp_qat_fw_comp_resp *resp_msg =
(struct icp_qat_fw_comp_resp *)resp;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
(resp_msg->opaque_data);
struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
sizeof(struct icp_qat_fw_comp_resp));
#endif
+ if (unlikely(cookie->error)) {
+ rx_op->status = cookie->error;
+ cookie->error = 0;
+ ++(*dequeue_err_count);
+ rx_op->debug_status = 0;
+ rx_op->consumed = 0;
+ rx_op->produced = 0;
+ *op = (void *)rx_op;
+ return 0;
+ }
+
if (likely(qat_xform->qat_comp_request_type
!= QAT_COMP_REQUEST_DECOMPRESS)) {
if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
/* fallback to fixed compression threshold */
#define QAT_FALLBACK_THLD ((uint32_t)(RTE_PMD_QAT_COMP_IM_BUFFER_SIZE / 1.1))
+#define QAT_MIN_OUT_BUF_SIZE 46
+
enum qat_comp_request_type {
QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
phys_addr_t qat_sgl_src_phys_addr;
phys_addr_t qat_sgl_dst_phys_addr;
/* dynamically created SGLs */
+ uint8_t error;
uint8_t socket_id;
uint16_t src_nb_elems;
uint16_t dst_nb_elems;
enum qat_device_gen qat_dev_gen __rte_unused);
int
-qat_comp_process_response(void **op, uint8_t *resp,
+qat_comp_process_response(void **op, uint8_t *resp, void *op_cookie,
uint64_t *dequeue_err_count);
int
QAT_PMD_COMP_SGL_DEF_SEGMENTS;
cookie->socket_id = dev->data->socket_id;
+
+ cookie->error = 0;
}
return ret;