uint8_t reserved[0x80];
};
+struct mlx5_ifc_regexp_match_tuple_bits {
+ uint8_t length[0x10];
+ uint8_t start_ptr[0x10];
+ uint8_t rule_id[0x20];
+};
+
/* Adding direct verbs to data-path. */
/* CQ sequence number mask. */
__mlx5_16_bit_off(typ, fld))); \
} while (0)
+#define MLX5_GET_VOLATILE(typ, p, fld) \
+ ((rte_be_to_cpu_32(*((volatile __be32 *)(p) +\
+ __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+ __mlx5_mask(typ, fld))
#define MLX5_GET(typ, p, fld) \
((rte_be_to_cpu_32(*((__be32 *)(p) +\
__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
#define MLX5_REGEX_WQE_METADATA_OFFSET 16
#define MLX5_REGEX_WQE_GATHER_OFFSET 32
#define MLX5_REGEX_WQE_SCATTER_OFFSET 48
+#define MLX5_REGEX_METADATA_OFF 32
static inline uint32_t
sq_size_get(struct mlx5_regex_sq *sq)
return (1U << sq->log_nb_desc);
}
+static inline uint32_t
+cq_size_get(struct mlx5_regex_cq *cq)
+{
+ return (1U << cq->log_nb_desc);
+}
+
struct mlx5_regex_job {
uint64_t user_id;
uint8_t *input;
static inline uint32_t
job_id_get(uint32_t qid, size_t sq_size, size_t index) {
- return qid * sq_size + index % sq_size;
+ return qid * sq_size + (index & (sq_size - 1));
}
uint16_t
return i;
}
+#define MLX5_REGEX_RESP_SZ 8
+
+static inline void
+extract_result(struct rte_regex_ops *op, struct mlx5_regex_job *job)
+{
+ size_t j, offset;
+ op->user_id = job->user_id;
+ op->nb_matches = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
+ MLX5_REGEX_METADATA_OFF,
+ match_count);
+ op->nb_actual_matches = MLX5_GET_VOLATILE(regexp_metadata,
+ job->metadata +
+ MLX5_REGEX_METADATA_OFF,
+ detected_match_count);
+ for (j = 0; j < op->nb_matches; j++) {
+ offset = MLX5_REGEX_RESP_SZ * j;
+ op->matches[j].rule_id =
+ MLX5_GET_VOLATILE(regexp_match_tuple,
+ (job->output + offset), rule_id);
+ op->matches[j].start_offset =
+ MLX5_GET_VOLATILE(regexp_match_tuple,
+ (job->output + offset), start_ptr);
+ op->matches[j].len =
+ MLX5_GET_VOLATILE(regexp_match_tuple,
+ (job->output + offset), length);
+ }
+}
+
+static inline volatile struct mlx5_cqe *
+poll_one(struct mlx5_regex_cq *cq)
+{
+ volatile struct mlx5_cqe *cqe;
+ size_t next_cqe_offset;
+
+ next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1));
+ cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset);
+ rte_cio_wmb();
+
+ int ret = check_cqe(cqe, cq_size_get(cq), cq->ci);
+
+ if (unlikely(ret == MLX5_CQE_STATUS_ERR)) {
+ DRV_LOG(ERR, "Completion with error on qp 0x%x", 0);
+ return NULL;
+ }
+
+ if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN))
+ return NULL;
+
+ return cqe;
+}
+
+
+/**
+ * DPDK callback for dequeue.
+ *
+ * @param dev
+ * Pointer to the regex dev structure.
+ * @param qp_id
+ * The queue to enqueue the traffic to.
+ * @param ops
+ * List of regex ops to dequeue.
+ * @param nb_ops
+ * Number of ops in ops parameter.
+ *
+ * @return
+ * Number of packets successfully dequeued (<= pkts_n).
+ */
+uint16_t
+mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
+ struct rte_regex_ops **ops, uint16_t nb_ops)
+{
+ struct mlx5_regex_priv *priv = dev->data->dev_private;
+ struct mlx5_regex_qp *queue = &priv->qps[qp_id];
+ struct mlx5_regex_cq *cq = &queue->cq;
+ volatile struct mlx5_cqe *cqe;
+ size_t i = 0;
+
+ while ((cqe = poll_one(cq))) {
+ uint16_t wq_counter
+ = (rte_be_to_cpu_16(cqe->wqe_counter) + 1) &
+ MLX5_REGEX_MAX_WQE_INDEX;
+ size_t sqid = cqe->rsvd3[2];
+ struct mlx5_regex_sq *sq = &queue->sqs[sqid];
+ while (sq->ci != wq_counter) {
+ if (unlikely(i == nb_ops)) {
+ /* Return without updating cq->ci */
+ goto out;
+ }
+ uint32_t job_id = job_id_get(sqid, sq_size_get(sq),
+ sq->ci);
+ extract_result(ops[i], &queue->jobs[job_id]);
+ sq->ci = (sq->ci + 1) & MLX5_REGEX_MAX_WQE_INDEX;
+ i++;
+ }
+ cq->ci = (cq->ci + 1) & 0xffffff;
+ rte_wmb();
+ cq->dbr[0] = rte_cpu_to_be_32(cq->ci);
+ queue->free_sqs |= (1 << sqid);
+ }
+
+out:
+ queue->ci += i;
+ return i;
+}
+
static void
setup_sqs(struct mlx5_regex_qp *queue)
{