regex/mlx5: support dequeue
authorYuval Avnery <yuvalav@mellanox.com>
Mon, 20 Jul 2020 06:26:16 +0000 (06:26 +0000)
committerThomas Monjalon <thomas@monjalon.net>
Tue, 21 Jul 2020 17:04:05 +0000 (19:04 +0200)
Implement dequeue function for the regex API.

Signed-off-by: Yuval Avnery <yuvalav@mellanox.com>
Acked-by: Ori Kam <orika@mellanox.com>
drivers/common/mlx5/mlx5_prm.h
drivers/regex/mlx5/mlx5_regex.c
drivers/regex/mlx5/mlx5_regex.h
drivers/regex/mlx5/mlx5_regex_control.c
drivers/regex/mlx5/mlx5_regex_fastpath.c

index bfbc58b..c05891f 100644 (file)
@@ -409,6 +409,12 @@ struct mlx5_ifc_regexp_metadata_bits {
        uint8_t reserved[0x80];
 };
 
+struct mlx5_ifc_regexp_match_tuple_bits {
+       uint8_t length[0x10];
+       uint8_t start_ptr[0x10];
+       uint8_t rule_id[0x20];
+};
+
 /* Adding direct verbs to data-path. */
 
 /* CQ sequence number mask. */
@@ -605,6 +611,10 @@ typedef uint8_t u8;
                                  __mlx5_16_bit_off(typ, fld))); \
        } while (0)
 
+#define MLX5_GET_VOLATILE(typ, p, fld) \
+       ((rte_be_to_cpu_32(*((volatile __be32 *)(p) +\
+       __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+       __mlx5_mask(typ, fld))
 #define MLX5_GET(typ, p, fld) \
        ((rte_be_to_cpu_32(*((__be32 *)(p) +\
        __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
index 68438a6..4cfd9d0 100644 (file)
@@ -166,6 +166,7 @@ mlx5_regex_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
        }
        priv->regexdev->dev_ops = &mlx5_regexdev_ops;
        priv->regexdev->enqueue = mlx5_regexdev_enqueue;
+       priv->regexdev->dequeue = mlx5_regexdev_dequeue;
        priv->regexdev->device = (struct rte_device *)pci_dev;
        priv->regexdev->data->dev_private = priv;
        priv->regexdev->state = RTE_REGEXDEV_READY;
index 43468f6..217a1a4 100644 (file)
@@ -34,6 +34,7 @@ struct mlx5_regex_cq {
        uint32_t dbr_umem; /* Door bell record umem id. */
        volatile struct mlx5_cqe *cqe; /* The CQ ring buffer. */
        struct mlx5dv_devx_umem *cqe_umem; /* CQ buffer umem. */
+       size_t ci;
        uint32_t *dbr;
 };
 
@@ -105,5 +106,7 @@ int mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind,
 int mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id);
 uint16_t mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id,
                       struct rte_regex_ops **ops, uint16_t nb_ops);
+uint16_t mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
+                      struct rte_regex_ops **ops, uint16_t nb_ops);
 
 #endif /* MLX5_REGEX_H */
index f144c69..faafb76 100644 (file)
@@ -120,6 +120,7 @@ regex_ctrl_create_cq(struct mlx5_regex_priv *priv, struct mlx5_regex_cq *cq)
        cq->cqe_umem = mlx5_glue->devx_umem_reg(priv->ctx, buf,
                                                sizeof(struct mlx5_cqe) *
                                                cq_size, 7);
+       cq->ci = 0;
        if (!cq->cqe_umem) {
                DRV_LOG(ERR, "Can't register cqe mem.");
                rte_errno  = ENOMEM;
index 699cc03..d8af2bf 100644 (file)
@@ -31,6 +31,7 @@
 #define MLX5_REGEX_WQE_METADATA_OFFSET 16
 #define MLX5_REGEX_WQE_GATHER_OFFSET 32
 #define MLX5_REGEX_WQE_SCATTER_OFFSET 48
+#define MLX5_REGEX_METADATA_OFF 32
 
 static inline uint32_t
 sq_size_get(struct mlx5_regex_sq *sq)
@@ -38,6 +39,12 @@ sq_size_get(struct mlx5_regex_sq *sq)
        return (1U << sq->log_nb_desc);
 }
 
+static inline uint32_t
+cq_size_get(struct mlx5_regex_cq *cq)
+{
+       return (1U << cq->log_nb_desc);
+}
+
 struct mlx5_regex_job {
        uint64_t user_id;
        uint8_t *input;
@@ -146,7 +153,7 @@ can_send(struct mlx5_regex_sq *sq) {
 
 static inline uint32_t
 job_id_get(uint32_t qid, size_t sq_size, size_t index) {
-       return qid * sq_size + index % sq_size;
+       return qid * sq_size + (index & (sq_size - 1));
 }
 
 uint16_t
@@ -179,6 +186,111 @@ out:
        return i;
 }
 
+#define MLX5_REGEX_RESP_SZ 8
+
+static inline void
+extract_result(struct rte_regex_ops *op, struct mlx5_regex_job *job)
+{
+       size_t j, offset;
+       op->user_id = job->user_id;
+       op->nb_matches = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
+                                          MLX5_REGEX_METADATA_OFF,
+                                          match_count);
+       op->nb_actual_matches = MLX5_GET_VOLATILE(regexp_metadata,
+                                                 job->metadata +
+                                                 MLX5_REGEX_METADATA_OFF,
+                                                 detected_match_count);
+       for (j = 0; j < op->nb_matches; j++) {
+               offset = MLX5_REGEX_RESP_SZ * j;
+               op->matches[j].rule_id =
+                       MLX5_GET_VOLATILE(regexp_match_tuple,
+                                         (job->output + offset), rule_id);
+               op->matches[j].start_offset =
+                       MLX5_GET_VOLATILE(regexp_match_tuple,
+                                         (job->output +  offset), start_ptr);
+               op->matches[j].len =
+                       MLX5_GET_VOLATILE(regexp_match_tuple,
+                                         (job->output +  offset), length);
+       }
+}
+
+static inline volatile struct mlx5_cqe *
+poll_one(struct mlx5_regex_cq *cq)
+{
+       volatile struct mlx5_cqe *cqe;
+       size_t next_cqe_offset;
+
+       next_cqe_offset =  (cq->ci & (cq_size_get(cq) - 1));
+       cqe = (volatile struct mlx5_cqe *)(cq->cqe + next_cqe_offset);
+       rte_cio_wmb();
+
+       int ret = check_cqe(cqe, cq_size_get(cq), cq->ci);
+
+       if (unlikely(ret == MLX5_CQE_STATUS_ERR)) {
+               DRV_LOG(ERR, "Completion with error on qp 0x%x",  0);
+               return NULL;
+       }
+
+       if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN))
+               return NULL;
+
+       return cqe;
+}
+
+
+/**
+ * DPDK callback for dequeue.
+ *
+ * @param dev
+ *   Pointer to the regex dev structure.
+ * @param qp_id
+ *   The queue to enqueue the traffic to.
+ * @param ops
+ *   List of regex ops to dequeue.
+ * @param nb_ops
+ *   Number of ops in ops parameter.
+ *
+ * @return
+ *   Number of packets successfully dequeued (<= pkts_n).
+ */
+uint16_t
+mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
+                     struct rte_regex_ops **ops, uint16_t nb_ops)
+{
+       struct mlx5_regex_priv *priv = dev->data->dev_private;
+       struct mlx5_regex_qp *queue = &priv->qps[qp_id];
+       struct mlx5_regex_cq *cq = &queue->cq;
+       volatile struct mlx5_cqe *cqe;
+       size_t i = 0;
+
+       while ((cqe = poll_one(cq))) {
+               uint16_t wq_counter
+                       = (rte_be_to_cpu_16(cqe->wqe_counter) + 1) &
+                         MLX5_REGEX_MAX_WQE_INDEX;
+               size_t sqid = cqe->rsvd3[2];
+               struct mlx5_regex_sq *sq = &queue->sqs[sqid];
+               while (sq->ci != wq_counter) {
+                       if (unlikely(i == nb_ops)) {
+                               /* Return without updating cq->ci */
+                               goto out;
+                       }
+                       uint32_t job_id = job_id_get(sqid, sq_size_get(sq),
+                                                    sq->ci);
+                       extract_result(ops[i], &queue->jobs[job_id]);
+                       sq->ci = (sq->ci + 1) & MLX5_REGEX_MAX_WQE_INDEX;
+                       i++;
+               }
+               cq->ci = (cq->ci + 1) & 0xffffff;
+               rte_wmb();
+               cq->dbr[0] = rte_cpu_to_be_32(cq->ci);
+               queue->free_sqs |= (1 << sqid);
+       }
+
+out:
+       queue->ci += i;
+       return i;
+}
+
 static void
 setup_sqs(struct mlx5_regex_qp *queue)
 {