RegEx Drivers
-------------
-Marvell OCTEON TX2 regex
+Marvell OCTEON CN9K regex
M: Liron Himi <lironh@marvell.com>
-F: drivers/regex/octeontx2/
-F: doc/guides/regexdevs/octeontx2.rst
-F: doc/guides/regexdevs/features/octeontx2.ini
+F: drivers/regex/cn9k/
+F: doc/guides/regexdevs/cn9k.rst
+F: doc/guides/regexdevs/features/cn9k.ini
Mellanox mlx5
M: Ori Kam <orika@nvidia.com>
echo "Skipped removed driver $name."
continue
fi
+ if grep -qE "\<librte_regex_octeontx2" $dump; then
+ echo "Skipped removed driver $name."
+ continue
+ fi
dump2=$(find $newdir -name $name)
if [ -z "$dump2" ] || [ ! -e "$dump2" ]; then
echo "Error: cannot find $name in $newdir" >&2
#. **Dmadev Driver**
See :doc:`../dmadevs/cnxk` for DPI Dmadev driver information.
+#. **Regex Device Driver**
+ See :doc:`../regexdevs/cn9k` for REE Regex device driver information.
+
Procedure to Setup Platform
---------------------------
#. **Crypto Device Driver**
See :doc:`../cryptodevs/octeontx2` for CPT crypto device driver information.
-#. **Regex Device Driver**
- See :doc:`../regexdevs/octeontx2` for REE regex device driver information.
-
Procedure to Setup Platform
---------------------------
--- /dev/null
+.. SPDX-License-Identifier: BSD-3-Clause
+ Copyright(c) 2020 Marvell International Ltd.
+
+CN9K REE Regexdev Driver
+==============================
+
+The CN9K REE PMD (**librte_regex_cn9k**) provides poll mode
+regexdev driver support for the inbuilt regex device found in the **Marvell CN9K**
+SoC family.
+
+More information about CN9K SoC can be found at `Marvell Official Website
+<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
+
+Features
+--------
+
+Features of the CN9K REE PMD are:
+
+- 36 queues
+- Up to 254 matches for each regex operation
+
+Prerequisites and Compilation procedure
+---------------------------------------
+
+ See :doc:`../platform/cnxk` for setup information.
+
+Device Setup
+------------
+
+The CN9K REE devices will need to be bound to a user-space IO driver
+for use. The script ``dpdk-devbind.py`` script included with DPDK can be
+used to view the state of the devices and to bind them to a suitable
+DPDK-supported kernel driver. When querying the status of the devices,
+they will appear under the category of "REGEX devices", i.e. the command
+``dpdk-devbind.py --status-dev regex`` can be used to see the state of
+those devices alone.
+
+Debugging Options
+-----------------
+
+.. _table_cn9k_regex_debug_options:
+
+.. table:: CN9K regex device debug options
+
+ +---+------------+-------------------------------------------------------+
+ | # | Component | EAL log command |
+ +===+============+=======================================================+
+ | 1 | REE | --log-level='pmd\.regex\.cn9k,8' |
+ +---+------------+-------------------------------------------------------+
--- /dev/null
+;
+; Supported features of the 'cn9k' regex driver.
+;
+; Refer to default.ini for the full list of available driver features.
+;
+[Features]
+PCRE back reference = Y
+PCRE word boundary = Y
+Run time compilation = Y
+Armv8 = Y
+++ /dev/null
-;
-; Supported features of the 'octeontx2' regex driver.
-;
-; Refer to default.ini for the full list of available driver features.
-;
-[Features]
-PCRE back reference = Y
-PCRE word boundary = Y
-Run time compilation = Y
-Armv8 = Y
:numbered:
features_overview
+ cn9k
mlx5
- octeontx2
+++ /dev/null
-.. SPDX-License-Identifier: BSD-3-Clause
- Copyright(c) 2020 Marvell International Ltd.
-
-OCTEON TX2 REE Regexdev Driver
-==============================
-
-The OCTEON TX2 REE PMD (**librte_regex_octeontx2**) provides poll mode
-regexdev driver support for the inbuilt regex device found in the **Marvell OCTEON TX2**
-SoC family.
-
-More information about OCTEON TX2 SoC can be found at `Marvell Official Website
-<https://www.marvell.com/embedded-processors/infrastructure-processors/>`_.
-
-Features
---------
-
-Features of the OCTEON TX2 REE PMD are:
-
-- 36 queues
-- Up to 254 matches for each regex operation
-
-Prerequisites and Compilation procedure
----------------------------------------
-
- See :doc:`../platform/octeontx2` for setup information.
-
-Device Setup
-------------
-
-The OCTEON TX2 REE devices will need to be bound to a user-space IO driver
-for use. The script ``dpdk-devbind.py`` script included with DPDK can be
-used to view the state of the devices and to bind them to a suitable
-DPDK-supported kernel driver. When querying the status of the devices,
-they will appear under the category of "REGEX devices", i.e. the command
-``dpdk-devbind.py --status-dev regex`` can be used to see the state of
-those devices alone.
-
-Debugging Options
------------------
-
-.. _table_octeontx2_regex_debug_options:
-
-.. table:: OCTEON TX2 regex device debug options
-
- +---+------------+-------------------------------------------------------+
- | # | Component | EAL log command |
- +===+============+=======================================================+
- | 1 | REE | --log-level='pmd\.regex\.octeontx2,8' |
- +---+------------+-------------------------------------------------------+
Added a new PMD for the hardware regex offload block for OCTEON TX2 SoC.
- See the :doc:`../regexdevs/octeontx2` for more details.
+ See ``regexdevs/octeontx2`` for more details.
* **Updated Software Eventdev driver.**
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_regexdev.h>
+#include <rte_regexdev_core.h>
+#include <rte_regexdev_driver.h>
+
+
+/* REE common headers */
+#include "cn9k_regexdev.h"
+#include "cn9k_regexdev_compiler.h"
+
+
+/* HW matches are at offset 0x80 from RES_PTR_ADDR
+ * In op structure matches starts at W5 (0x28)
+ * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
+ * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
+ */
+#define REE_NUM_MATCHES_ALIGN 11
+/* The REE co-processor will write up to 254 job match structures
+ * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
+ */
+#define REE_MATCH_OFFSET 0x80
+
+#define REE_MAX_RULES_PER_GROUP 0xFFFF
+#define REE_MAX_GROUPS 0xFFFF
+
+
+#define REE_RULE_DB_VERSION 2
+#define REE_RULE_DB_REVISION 0
+
+struct ree_rule_db_entry {
+ uint8_t type;
+ uint32_t addr;
+ uint64_t value;
+};
+
+struct ree_rule_db {
+ uint32_t version;
+ uint32_t revision;
+ uint32_t number_of_entries;
+ struct ree_rule_db_entry entries[];
+} __rte_packed;
+
+static void
+qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
+{
+ snprintf(name, size, "cn9k_ree_lf_mem_%u:%u", dev_id, qp_id);
+}
+
+static struct roc_ree_qp *
+ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+ struct roc_ree_vf *vf = &data->vf;
+ const struct rte_memzone *lf_mem;
+ uint32_t len, iq_len, size_div2;
+ char name[RTE_MEMZONE_NAMESIZE];
+ uint64_t used_len, iova;
+ struct roc_ree_qp *qp;
+ uint8_t *va;
+ int ret;
+
+ /* Allocate queue pair */
+ qp = rte_zmalloc("CN9K Regex PMD Queue Pair", sizeof(*qp),
+ ROC_ALIGN);
+ if (qp == NULL) {
+ cn9k_err("Could not allocate queue pair");
+ return NULL;
+ }
+
+ iq_len = REE_IQ_LEN;
+
+ /*
+ * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
+ * and a power of 2.
+ * effective queue size to software is (size - 1) * 128
+ */
+ size_div2 = iq_len >> 1;
+
+ /* For pending queue */
+ len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
+
+ /* So that instruction queues start as pg size aligned */
+ len = RTE_ALIGN(len, pg_sz);
+
+ /* For instruction queues */
+ len += REE_IQ_LEN * sizeof(union roc_ree_inst);
+
+ /* Waste after instruction queues */
+ len = RTE_ALIGN(len, pg_sz);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp_id);
+
+ lf_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
+ RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
+ RTE_CACHE_LINE_SIZE);
+ if (lf_mem == NULL) {
+ cn9k_err("Could not allocate reserved memzone");
+ goto qp_free;
+ }
+
+ va = lf_mem->addr;
+ iova = lf_mem->iova;
+
+ memset(va, 0, len);
+
+ /* Initialize pending queue */
+ qp->pend_q.rid_queue = (struct roc_ree_rid *)va;
+ qp->pend_q.enq_tail = 0;
+ qp->pend_q.deq_head = 0;
+ qp->pend_q.pending_count = 0;
+
+ used_len = iq_len * RTE_ALIGN(sizeof(struct roc_ree_rid), 8);
+ used_len = RTE_ALIGN(used_len, pg_sz);
+ iova += used_len;
+
+ qp->iq_dma_addr = iova;
+ qp->id = qp_id;
+ qp->base = roc_ree_qp_get_base(vf, qp_id);
+ qp->roc_regexdev_jobid = 0;
+ qp->write_offset = 0;
+
+ ret = roc_ree_iq_enable(vf, qp, REE_QUEUE_HI_PRIO, size_div2);
+ if (ret) {
+ cn9k_err("Could not enable instruction queue");
+ goto qp_free;
+ }
+
+ return qp;
+
+qp_free:
+ rte_free(qp);
+ return NULL;
+}
+
+static int
+ree_qp_destroy(const struct rte_regexdev *dev, struct roc_ree_qp *qp)
+{
+ const struct rte_memzone *lf_mem;
+ char name[RTE_MEMZONE_NAMESIZE];
+ int ret;
+
+ roc_ree_iq_disable(qp);
+
+ qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
+ qp->id);
+
+ lf_mem = rte_memzone_lookup(name);
+
+ ret = rte_memzone_free(lf_mem);
+ if (ret)
+ return ret;
+
+ rte_free(qp);
+
+ return 0;
+}
+
+static int
+ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_qp *qp = data->queue_pairs[qp_id];
+ int ret;
+
+ ree_func_trace("Queue=%d", qp_id);
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ ret = ree_qp_destroy(dev, qp);
+ if (ret) {
+ cn9k_err("Could not destroy queue pair %d", qp_id);
+ return ret;
+ }
+
+ data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+static struct rte_regexdev *
+ree_dev_register(const char *name)
+{
+ struct rte_regexdev *dev;
+
+ cn9k_ree_dbg("Creating regexdev %s\n", name);
+
+ /* allocate device structure */
+ dev = rte_regexdev_register(name);
+ if (dev == NULL) {
+ cn9k_err("Failed to allocate regex device for %s", name);
+ return NULL;
+ }
+
+ /* allocate private device structure */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ dev->data->dev_private =
+ rte_zmalloc_socket("regexdev device private",
+ sizeof(struct cn9k_ree_data),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (dev->data->dev_private == NULL) {
+ cn9k_err("Cannot allocate memory for dev %s private data",
+ name);
+
+ rte_regexdev_unregister(dev);
+ return NULL;
+ }
+ }
+
+ return dev;
+}
+
+static int
+ree_dev_unregister(struct rte_regexdev *dev)
+{
+ cn9k_ree_dbg("Closing regex device %s", dev->device->name);
+
+ /* free regex device */
+ rte_regexdev_unregister(dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(dev->data->dev_private);
+
+ return 0;
+}
+
+static int
+ree_dev_fini(struct rte_regexdev *dev)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ int i, ret;
+
+ ree_func_trace();
+
+ for (i = 0; i < data->nb_queue_pairs; i++) {
+ ret = ree_queue_pair_release(dev, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = roc_ree_queues_detach(vf);
+ if (ret)
+ cn9k_err("Could not detach queues");
+
+ /* TEMP : should be in lib */
+ if (data->queue_pairs)
+ rte_free(data->queue_pairs);
+ if (data->rules)
+ rte_free(data->rules);
+
+ roc_ree_dev_fini(vf);
+
+ ret = ree_dev_unregister(dev);
+ if (ret)
+ cn9k_err("Could not destroy PMD");
+
+ return ret;
+}
+
+static inline int
+ree_enqueue(struct roc_ree_qp *qp, struct rte_regex_ops *op,
+ struct roc_ree_pending_queue *pend_q)
+{
+ union roc_ree_inst inst;
+ union ree_res *res;
+ uint32_t offset;
+
+ if (unlikely(pend_q->pending_count >= REE_DEFAULT_CMD_QLEN)) {
+ cn9k_err("Pending count %" PRIu64 " is greater than Q size %d",
+ pend_q->pending_count, REE_DEFAULT_CMD_QLEN);
+ return -EAGAIN;
+ }
+ if (unlikely(op->mbuf->data_len > REE_MAX_PAYLOAD_SIZE ||
+ op->mbuf->data_len == 0)) {
+ cn9k_err("Packet length %d is greater than MAX payload %d",
+ op->mbuf->data_len, REE_MAX_PAYLOAD_SIZE);
+ return -EAGAIN;
+ }
+
+ /* W 0 */
+ inst.cn98xx.ooj = 1;
+ inst.cn98xx.dg = 0;
+ inst.cn98xx.doneint = 0;
+ /* W 1 */
+ inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
+ /* W 2 */
+ inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
+ inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
+
+ /* W 3 */
+ inst.cn98xx.res_ptr_addr = (uint64_t)op;
+ /* W 4 */
+ inst.cn98xx.wq_ptr = 0;
+ /* W 5 */
+ inst.cn98xx.ggrp = 0;
+ inst.cn98xx.tt = 0;
+ inst.cn98xx.tag = 0;
+ /* W 6 */
+ inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
+ if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
+ inst.cn98xx.ree_job_ctrl = (0x2 << 8);
+ else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
+ inst.cn98xx.ree_job_ctrl = (0x1 << 8);
+ else
+ inst.cn98xx.ree_job_ctrl = 0;
+ inst.cn98xx.ree_job_id = qp->roc_regexdev_jobid;
+ /* W 7 */
+ inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
+ if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
+ inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
+ else
+ inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
+ if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
+ inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
+ else
+ inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
+ if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
+ inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
+ else
+ inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
+
+ /* Copy REE command to Q */
+ offset = qp->write_offset * sizeof(inst);
+ memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
+
+ pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
+ pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
+
+ /* Mark result as not done */
+ res = (union ree_res *)(op);
+ res->s.done = 0;
+ res->s.ree_err = 0;
+
+ /* We will use soft queue length here to limit requests */
+ REE_MOD_INC(pend_q->enq_tail, REE_DEFAULT_CMD_QLEN);
+ pend_q->pending_count += 1;
+ REE_MOD_INC(qp->roc_regexdev_jobid, 0xFFFFFF);
+ REE_MOD_INC(qp->write_offset, REE_IQ_LEN);
+
+ return 0;
+}
+
+static uint16_t
+cn9k_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
+ struct rte_regex_ops **ops, uint16_t nb_ops)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_qp *qp = data->queue_pairs[qp_id];
+ struct roc_ree_pending_queue *pend_q;
+ uint16_t nb_allowed, count = 0;
+ struct rte_regex_ops *op;
+ int ret;
+
+ pend_q = &qp->pend_q;
+
+ nb_allowed = REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
+ if (nb_ops > nb_allowed)
+ nb_ops = nb_allowed;
+
+ for (count = 0; count < nb_ops; count++) {
+ op = ops[count];
+ ret = ree_enqueue(qp, op, pend_q);
+
+ if (unlikely(ret))
+ break;
+ }
+
+ /*
+ * Make sure all instructions are written before DOORBELL is activated
+ */
+ rte_io_wmb();
+
+ /* Update Doorbell */
+ plt_write64(count, qp->base + REE_LF_DOORBELL);
+
+ return count;
+}
+
+static inline void
+ree_dequeue_post_process(struct rte_regex_ops *ops)
+{
+ uint8_t ree_res_mcnt, ree_res_dmcnt;
+ int off = REE_MATCH_OFFSET;
+ struct ree_res_s_98 *res;
+ uint16_t ree_res_status;
+ uint64_t match;
+
+ res = (struct ree_res_s_98 *)ops;
+ /* store res values on stack since ops and res
+ * are using the same memory
+ */
+ ree_res_status = res->ree_res_status;
+ ree_res_mcnt = res->ree_res_mcnt;
+ ree_res_dmcnt = res->ree_res_dmcnt;
+ ops->rsp_flags = 0;
+ ops->nb_actual_matches = ree_res_dmcnt;
+ ops->nb_matches = ree_res_mcnt;
+ if (unlikely(res->ree_err)) {
+ ops->nb_actual_matches = 0;
+ ops->nb_matches = 0;
+ }
+
+ if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
+ if (ree_res_status & REE_STATUS_PMI_SOJ_BIT)
+ ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
+ if (ree_res_status & REE_STATUS_PMI_EOJ_BIT)
+ ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
+ if (ree_res_status & REE_STATUS_ML_CNT_DET_BIT)
+ ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
+ if (ree_res_status & REE_STATUS_MM_CNT_DET_BIT)
+ ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
+ if (ree_res_status & REE_STATUS_MP_CNT_DET_BIT)
+ ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
+ }
+ if (ops->nb_matches > 0) {
+ /* Move the matches to the correct offset */
+ off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
+ ops->nb_matches : REE_NUM_MATCHES_ALIGN);
+ match = (uint64_t)ops + REE_MATCH_OFFSET;
+ match += (ops->nb_matches - off) *
+ sizeof(union ree_match);
+ memcpy((void *)ops->matches, (void *)match,
+ off * sizeof(union ree_match));
+ }
+}
+
+static uint16_t
+cn9k_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
+ struct rte_regex_ops **ops, uint16_t nb_ops)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_qp *qp = data->queue_pairs[qp_id];
+ struct roc_ree_pending_queue *pend_q;
+ int i, nb_pending, nb_completed = 0;
+ volatile struct ree_res_s_98 *res;
+ struct roc_ree_rid *rid;
+
+ pend_q = &qp->pend_q;
+
+ nb_pending = pend_q->pending_count;
+
+ if (nb_ops > nb_pending)
+ nb_ops = nb_pending;
+
+ for (i = 0; i < nb_ops; i++) {
+ rid = &pend_q->rid_queue[pend_q->deq_head];
+ res = (volatile struct ree_res_s_98 *)(rid->rid);
+
+ /* Check response header done bit if completed */
+ if (unlikely(!res->done))
+ break;
+
+ ops[i] = (struct rte_regex_ops *)(rid->rid);
+ ops[i]->user_id = rid->user_id;
+
+ REE_MOD_INC(pend_q->deq_head, REE_DEFAULT_CMD_QLEN);
+ pend_q->pending_count -= 1;
+ }
+
+ nb_completed = i;
+
+ for (i = 0; i < nb_completed; i++)
+ ree_dequeue_post_process(ops[i]);
+
+ return nb_completed;
+}
+
+static int
+cn9k_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+
+ ree_func_trace();
+
+ if (info == NULL)
+ return -EINVAL;
+
+ info->driver_name = dev->device->driver->name;
+ info->dev = dev->device;
+
+ info->max_queue_pairs = vf->max_queues;
+ info->max_matches = vf->max_matches;
+ info->max_payload_size = REE_MAX_PAYLOAD_SIZE;
+ info->max_rules_per_group = data->max_rules_per_group;
+ info->max_groups = data->max_groups;
+ info->regexdev_capa = data->regexdev_capa;
+ info->rule_flags = data->rule_flags;
+
+ return 0;
+}
+
+static int
+cn9k_ree_dev_config(struct rte_regexdev *dev,
+ const struct rte_regexdev_config *cfg)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ const struct ree_rule_db *rule_db;
+ uint32_t rule_db_len;
+ int ret;
+
+ ree_func_trace();
+
+ if (cfg->nb_queue_pairs > vf->max_queues) {
+ cn9k_err("Invalid number of queue pairs requested");
+ return -EINVAL;
+ }
+
+ if (cfg->nb_max_matches != vf->max_matches) {
+ cn9k_err("Invalid number of max matches requested");
+ return -EINVAL;
+ }
+
+ if (cfg->dev_cfg_flags != 0) {
+ cn9k_err("Invalid device configuration flags requested");
+ return -EINVAL;
+ }
+
+ /* Unregister error interrupts */
+ if (vf->err_intr_registered)
+ roc_ree_err_intr_unregister(vf);
+
+ /* Detach queues */
+ if (vf->nb_queues) {
+ ret = roc_ree_queues_detach(vf);
+ if (ret) {
+ cn9k_err("Could not detach REE queues");
+ return ret;
+ }
+ }
+
+ /* TEMP : should be in lib */
+ if (data->queue_pairs == NULL) { /* first time configuration */
+ data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
+ sizeof(data->queue_pairs[0]) *
+ cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
+
+ if (data->queue_pairs == NULL) {
+ data->nb_queue_pairs = 0;
+ cn9k_err("Failed to get memory for qp meta data, nb_queues %u",
+ cfg->nb_queue_pairs);
+ return -ENOMEM;
+ }
+ } else { /* re-configure */
+ uint16_t old_nb_queues = data->nb_queue_pairs;
+ void **qp;
+ unsigned int i;
+
+ qp = data->queue_pairs;
+
+ for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
+ ret = ree_queue_pair_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
+ RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ cn9k_err("Failed to realloc qp meta data, nb_queues %u",
+ cfg->nb_queue_pairs);
+ return -ENOMEM;
+ }
+
+ if (cfg->nb_queue_pairs > old_nb_queues) {
+ uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
+ memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
+ }
+
+ data->queue_pairs = qp;
+ }
+ data->nb_queue_pairs = cfg->nb_queue_pairs;
+
+ /* Attach queues */
+ cn9k_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
+ ret = roc_ree_queues_attach(vf, cfg->nb_queue_pairs);
+ if (ret) {
+ cn9k_err("Could not attach queues");
+ return -ENODEV;
+ }
+
+ ret = roc_ree_msix_offsets_get(vf);
+ if (ret) {
+ cn9k_err("Could not get MSI-X offsets");
+ goto queues_detach;
+ }
+
+ if (cfg->rule_db && cfg->rule_db_len) {
+ cn9k_ree_dbg("rule_db length %d", cfg->rule_db_len);
+ rule_db = (const struct ree_rule_db *)cfg->rule_db;
+ rule_db_len = rule_db->number_of_entries *
+ sizeof(struct ree_rule_db_entry);
+ cn9k_ree_dbg("rule_db number of entries %d",
+ rule_db->number_of_entries);
+ if (rule_db_len > cfg->rule_db_len) {
+ cn9k_err("Could not program rule db");
+ ret = -EINVAL;
+ goto queues_detach;
+ }
+ ret = roc_ree_rule_db_prog(vf, (const char *)rule_db->entries,
+ rule_db_len, NULL, REE_NON_INC_PROG);
+ if (ret) {
+ cn9k_err("Could not program rule db");
+ goto queues_detach;
+ }
+ }
+
+ dev->enqueue = cn9k_ree_enqueue_burst;
+ dev->dequeue = cn9k_ree_dequeue_burst;
+
+ rte_mb();
+ return 0;
+
+queues_detach:
+ roc_ree_queues_detach(vf);
+ return ret;
+}
+
+static int
+cn9k_ree_stop(struct rte_regexdev *dev)
+{
+ RTE_SET_USED(dev);
+
+ ree_func_trace();
+ return 0;
+}
+
+static int
+cn9k_ree_start(struct rte_regexdev *dev)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ uint32_t rule_db_len = 0;
+ int ret;
+
+ ree_func_trace();
+
+ ret = roc_ree_rule_db_len_get(vf, &rule_db_len, NULL);
+ if (ret)
+ return ret;
+ if (rule_db_len == 0) {
+ cn9k_err("Rule db not programmed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+cn9k_ree_close(struct rte_regexdev *dev)
+{
+ return ree_dev_fini(dev);
+}
+
+static int
+cn9k_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
+ const struct rte_regexdev_qp_conf *qp_conf)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_qp *qp;
+
+ ree_func_trace("Queue=%d", qp_id);
+
+ if (data->queue_pairs[qp_id] != NULL)
+ ree_queue_pair_release(dev, qp_id);
+
+ if (qp_conf->nb_desc > REE_DEFAULT_CMD_QLEN) {
+ cn9k_err("Could not setup queue pair for %u descriptors",
+ qp_conf->nb_desc);
+ return -EINVAL;
+ }
+ if (qp_conf->qp_conf_flags != 0) {
+ cn9k_err("Could not setup queue pair with configuration flags 0x%x",
+ qp_conf->qp_conf_flags);
+ return -EINVAL;
+ }
+
+ qp = ree_qp_create(dev, qp_id);
+ if (qp == NULL) {
+ cn9k_err("Could not create queue pair %d", qp_id);
+ return -ENOMEM;
+ }
+ data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+static int
+cn9k_ree_rule_db_compile_activate(struct rte_regexdev *dev)
+{
+ return cn9k_ree_rule_db_compile_prog(dev);
+}
+
+static int
+cn9k_ree_rule_db_update(struct rte_regexdev *dev,
+ const struct rte_regexdev_rule *rules, uint16_t nb_rules)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct rte_regexdev_rule *old_ptr;
+ uint32_t i, sum_nb_rules;
+
+ ree_func_trace("nb_rules=%d", nb_rules);
+
+ for (i = 0; i < nb_rules; i++) {
+ if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
+ break;
+ if (rules[i].group_id >= data->max_groups)
+ break;
+ if (rules[i].rule_id >= data->max_rules_per_group)
+ break;
+ /* logical implication
+ * p q p -> q
+ * 0 0 1
+ * 0 1 1
+ * 1 0 0
+ * 1 1 1
+ */
+ if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
+ break;
+ }
+ nb_rules = i;
+
+ if (data->nb_rules == 0) {
+
+ data->rules = rte_malloc("rte_regexdev_rules",
+ nb_rules*sizeof(struct rte_regexdev_rule), 0);
+ if (data->rules == NULL)
+ return -ENOMEM;
+
+ memcpy(data->rules, rules,
+ nb_rules*sizeof(struct rte_regexdev_rule));
+ data->nb_rules = nb_rules;
+ } else {
+
+ old_ptr = data->rules;
+ sum_nb_rules = data->nb_rules + nb_rules;
+ data->rules = rte_realloc(data->rules,
+ sum_nb_rules * sizeof(struct rte_regexdev_rule),
+ 0);
+ if (data->rules == NULL) {
+ data->rules = old_ptr;
+ return -ENOMEM;
+ }
+ memcpy(&data->rules[data->nb_rules], rules,
+ nb_rules*sizeof(struct rte_regexdev_rule));
+ data->nb_rules = sum_nb_rules;
+ }
+ return nb_rules;
+}
+
+static int
+cn9k_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
+ uint32_t rule_db_len)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ const struct ree_rule_db *ree_rule_db;
+ uint32_t ree_rule_db_len;
+ int ret;
+
+ ree_func_trace("rule_db_len=%d", rule_db_len);
+
+ ree_rule_db = (const struct ree_rule_db *)rule_db;
+ ree_rule_db_len = ree_rule_db->number_of_entries *
+ sizeof(struct ree_rule_db_entry);
+ if (ree_rule_db_len > rule_db_len) {
+ cn9k_err("Could not program rule db");
+ return -EINVAL;
+ }
+ ret = roc_ree_rule_db_prog(vf, (const char *)ree_rule_db->entries,
+ ree_rule_db_len, NULL, REE_NON_INC_PROG);
+ if (ret) {
+ cn9k_err("Could not program rule db");
+ return -ENOSPC;
+ }
+ return 0;
+}
+
+static int
+cn9k_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ struct ree_rule_db *ree_rule_db;
+ uint32_t rule_dbi_len;
+ uint32_t rule_db_len;
+ int ret;
+
+ ree_func_trace();
+
+ ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
+ if (ret)
+ return ret;
+
+ if (rule_db == NULL) {
+ rule_db_len += sizeof(struct ree_rule_db);
+ return rule_db_len;
+ }
+
+ ree_rule_db = (struct ree_rule_db *)rule_db;
+ ret = roc_ree_rule_db_get(vf, (char *)ree_rule_db->entries,
+ rule_db_len, NULL, 0);
+ if (ret) {
+ cn9k_err("Could not export rule db");
+ return -EFAULT;
+ }
+ ree_rule_db->number_of_entries =
+ rule_db_len/sizeof(struct ree_rule_db_entry);
+ ree_rule_db->revision = REE_RULE_DB_REVISION;
+ ree_rule_db->version = REE_RULE_DB_VERSION;
+
+ return 0;
+}
+
+static struct rte_regexdev_ops cn9k_ree_ops = {
+ .dev_info_get = cn9k_ree_dev_info_get,
+ .dev_configure = cn9k_ree_dev_config,
+ .dev_qp_setup = cn9k_ree_queue_pair_setup,
+ .dev_start = cn9k_ree_start,
+ .dev_stop = cn9k_ree_stop,
+ .dev_close = cn9k_ree_close,
+ .dev_attr_get = NULL,
+ .dev_attr_set = NULL,
+ .dev_rule_db_update = cn9k_ree_rule_db_update,
+ .dev_rule_db_compile_activate =
+ cn9k_ree_rule_db_compile_activate,
+ .dev_db_import = cn9k_ree_rule_db_import,
+ .dev_db_export = cn9k_ree_rule_db_export,
+ .dev_xstats_names_get = NULL,
+ .dev_xstats_get = NULL,
+ .dev_xstats_by_name_get = NULL,
+ .dev_xstats_reset = NULL,
+ .dev_selftest = NULL,
+ .dev_dump = NULL,
+};
+
+static int
+cn9k_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ char name[RTE_REGEXDEV_NAME_MAX_LEN];
+ struct cn9k_ree_data *data;
+ struct rte_regexdev *dev;
+ struct roc_ree_vf *vf;
+ int ret;
+
+ ret = roc_plt_init();
+ if (ret < 0) {
+ plt_err("Failed to initialize platform model");
+ return ret;
+ }
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = ree_dev_register(name);
+ if (dev == NULL) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ dev->dev_ops = &cn9k_ree_ops;
+ dev->device = &pci_dev->device;
+
+ /* Get private data space allocated */
+ data = dev->data->dev_private;
+ vf = &data->vf;
+ vf->pci_dev = pci_dev;
+ ret = roc_ree_dev_init(vf);
+ if (ret) {
+ plt_err("Failed to initialize roc cpt rc=%d", ret);
+ goto dev_unregister;
+ }
+
+ data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
+ RTE_REGEX_PCRE_RULE_ANCHORED_F;
+ data->regexdev_capa = 0;
+ data->max_groups = REE_MAX_GROUPS;
+ data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
+ data->nb_rules = 0;
+
+ dev->state = RTE_REGEXDEV_READY;
+ return 0;
+
+dev_unregister:
+ ree_dev_unregister(dev);
+exit:
+ cn9k_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
+ pci_dev->id.vendor_id, pci_dev->id.device_id);
+ return ret;
+}
+
+static int
+cn9k_ree_pci_remove(struct rte_pci_device *pci_dev)
+{
+ char name[RTE_REGEXDEV_NAME_MAX_LEN];
+ struct rte_regexdev *dev = NULL;
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ dev = rte_regexdev_get_device_by_name(name);
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ return ree_dev_fini(dev);
+}
+
+static struct rte_pci_id pci_id_ree_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_CNXK_RVU_REE_PF)
+ },
+ {
+ .vendor_id = 0,
+ }
+};
+
+static struct rte_pci_driver cn9k_regexdev_pmd = {
+ .id_table = pci_id_ree_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = cn9k_ree_pci_probe,
+ .remove = cn9k_ree_pci_remove,
+};
+
+
+RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_CN9K_PMD, cn9k_regexdev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_CN9K_PMD, pci_id_ree_table);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _CN9K_REGEXDEV_H_
+#define _CN9K_REGEXDEV_H_
+
+#include <rte_common.h>
+#include <rte_regexdev.h>
+
+#include "roc_api.h"
+
+#define cn9k_ree_dbg plt_ree_dbg
+#define cn9k_err plt_err
+
+#define ree_func_trace cn9k_ree_dbg
+
+/* Marvell CN9K Regex PMD device name */
+#define REGEXDEV_NAME_CN9K_PMD regex_cn9k
+
+/**
+ * Device private data
+ */
+struct cn9k_ree_data {
+ uint32_t regexdev_capa;
+ uint64_t rule_flags;
+ /**< Feature flags exposes HW/SW features for the given device */
+ uint16_t max_rules_per_group;
+ /**< Maximum rules supported per subset by this device */
+ uint16_t max_groups;
+ /**< Maximum subset supported by this device */
+ void **queue_pairs;
+ /**< Array of pointers to queue pairs. */
+ uint16_t nb_queue_pairs;
+ /**< Number of device queue pairs. */
+ struct roc_ree_vf vf;
+ /**< vf data */
+ struct rte_regexdev_rule *rules;
+ /**< rules to be compiled */
+ uint16_t nb_rules;
+ /**< number of rules */
+} __rte_cache_aligned;
+
+#endif /* _CN9K_REGEXDEV_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <rte_malloc.h>
+#include <rte_regexdev.h>
+
+#include "cn9k_regexdev.h"
+#include "cn9k_regexdev_compiler.h"
+
+#ifdef REE_COMPILER_SDK
+#include <rxp-compiler.h>
+
+static int
+ree_rule_db_compile(const struct rte_regexdev_rule *rules,
+ uint16_t nb_rules, struct rxp_rof **rof, struct rxp_rof **rofi,
+ struct rxp_rof *rof_for_incremental_compile,
+ struct rxp_rof *rofi_for_incremental_compile)
+{
+ /*INPUT*/
+ struct rxp_prefix_selection_control_list *prefix_selection_control_list
+ = NULL;
+ struct rxp_blacklist_data_sample *blacklist_sample_data = NULL;
+ struct rxp_rule_ids_to_remove *rule_ids_to_remove = NULL;
+ struct rxp_roff *roff_for_incremental_compile = NULL;
+
+ /*OPTIONS - setting default values*/
+ enum rxp_virtual_prefix_mode virtual_prefix_mode =
+ RXP_VIRTUAL_PREFIX_MODE_0;
+ enum rxp_prefix_capacity prefix_capacity = RXP_PREFIX_CAPACITY_32K;
+ /**< rxp_global_regex_options_flags*/
+ enum rxp_compiler_objective objective = RXP_COMPILER_OBJECTIVE_5;
+ enum rxp_tpe_data_width tpe_data_width = RXP_TPE_DATA_WIDTH_4;
+ uint32_t compiler_options = RXP_COMPILER_OPTIONS_FORCE;
+ /**< rxp_compiler_options_flags*/
+ enum rxp_verbose_level verbose = RXP_VERBOSE_LEVEL_3;
+ enum rxp_version set_rxp_version = RXP_VERSION_V5_8;
+ uint32_t compiler_output_flags = 0;
+ /**< rxp_compiler_output_flags*/
+ uint32_t global_regex_options = 0;
+ /**< rxp_global_regex_options_flags*/
+ float set_auto_blacklist = 0;
+ uint32_t max_rep_max = 65535;
+ uint32_t divide_ruleset = 1;
+ struct rxp_ruleset ruleset;
+ float ptpb_threshold = 0;
+ uint32_t set_max = 0;
+ uint32_t threads = 1;
+
+ /*OUTPUT*/
+ struct rxp_rule_direction_analysis *rule_direction_analysis = NULL;
+ struct rxp_compilation_statistics *compilation_statistics = NULL;
+ struct rxp_prefix_selection_control_list *generated_pscl = NULL;
+ struct rxp_uncompiled_rules_log *uncompiled_rules_log = NULL;
+ struct rxp_critical_rules_rank *critical_rules_rank = NULL;
+ struct rxp_compiled_rules_log *compiled_rules_log = NULL;
+ struct rxp_roff *roff = NULL;
+
+ uint16_t i;
+ int ret;
+
+ ruleset.number_of_entries = nb_rules;
+ ruleset.rules = rte_malloc("rxp_rule_entry",
+ nb_rules*sizeof(struct rxp_rule_entry), 0);
+
+ if (ruleset.rules == NULL) {
+ cn9k_err("Could not allocate memory for rule compilation\n");
+ return -EFAULT;
+ }
+ if (rof_for_incremental_compile)
+ compiler_options |= RXP_COMPILER_OPTIONS_INCREMENTAL;
+ if (rofi_for_incremental_compile)
+ compiler_options |= RXP_COMPILER_OPTIONS_CHECKSUM;
+
+ for (i = 0; i < nb_rules; i++) {
+ ruleset.rules[i].number_of_prefix_entries = 0;
+ ruleset.rules[i].prefix = NULL;
+ ruleset.rules[i].rule = rules[i].pcre_rule;
+ ruleset.rules[i].rule_id = rules[i].rule_id;
+ ruleset.rules[i].subset_id = rules[i].group_id;
+ ruleset.rules[i].rule_direction_type =
+ RXP_RULE_DIRECTION_TYPE_NONE;
+ }
+
+ ret = rxp_compile_advanced(
+ /*INPUT*/
+ &ruleset,
+ prefix_selection_control_list,
+ rof_for_incremental_compile,
+ roff_for_incremental_compile,
+ rofi_for_incremental_compile,
+ rule_ids_to_remove,
+ blacklist_sample_data,
+
+ /*OPTIONS*/
+ compiler_options,
+ prefix_capacity,
+ global_regex_options,
+ set_auto_blacklist,
+ set_max,
+ objective,
+ ptpb_threshold,
+ max_rep_max,
+ threads,
+ set_rxp_version,
+ verbose,
+ tpe_data_width,
+ virtual_prefix_mode,
+ compiler_output_flags,
+ divide_ruleset,
+
+ /*OUTPUT*/
+ &compilation_statistics,
+ &compiled_rules_log,
+ &critical_rules_rank,
+ &rule_direction_analysis,
+ &uncompiled_rules_log,
+ rof,
+ &roff,
+ rofi,
+ &generated_pscl);
+ rte_free(ruleset.rules);
+
+ return ret;
+}
+
+int
+cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev)
+{
+ struct cn9k_ree_data *data = dev->data->dev_private;
+ struct roc_ree_vf *vf = &data->vf;
+ char compiler_version[] = "20.5.2.eda0fa2";
+ char timestamp[] = "19700101_000001";
+ uint32_t rule_db_len, rule_dbi_len;
+ struct rxp_rof *rofi_inc_p = NULL;
+ struct rxp_rof_entry rule_dbi[6];
+ char *rofi_rof_entries = NULL;
+ struct rxp_rof *rofi = NULL;
+ struct rxp_rof *rof = NULL;
+ struct rxp_rof rofi_inc;
+ struct rxp_rof rof_inc;
+ char *rule_db = NULL;
+ int ret;
+
+ ree_func_trace();
+
+ ret = roc_ree_rule_db_len_get(vf, &rule_db_len, &rule_dbi_len);
+ if (ret != 0) {
+ cn9k_err("Could not get rule db length");
+ return ret;
+ }
+
+ if (rule_db_len > 0) {
+ cn9k_ree_dbg("Incremental compile, rule db len %d rule dbi len %d",
+ rule_db_len, rule_dbi_len);
+ rule_db = rte_malloc("ree_rule_db", rule_db_len, 0);
+ if (!rule_db) {
+ cn9k_err("Could not allocate memory for rule db");
+ return -EFAULT;
+ }
+
+ ret = roc_ree_rule_db_get(vf, rule_db, rule_db_len,
+ (char *)rule_dbi, rule_dbi_len);
+ if (ret) {
+ cn9k_err("Could not read rule db");
+ rte_free(rule_db);
+ return -EFAULT;
+ }
+ rof_inc.rof_revision = 0;
+ rof_inc.rof_version = 2;
+ rof_inc.rof_entries = (struct rxp_rof_entry *)rule_db;
+ rof_inc.rxp_compiler_version = compiler_version;
+ rof_inc.timestamp = timestamp;
+ rof_inc.number_of_entries =
+ (rule_db_len/sizeof(struct rxp_rof_entry));
+
+ if (rule_dbi_len > 0) {
+ /* incremental compilation not the first time */
+ rofi_inc.rof_revision = 0;
+ rofi_inc.rof_version = 2;
+ rofi_inc.rof_entries = rule_dbi;
+ rofi_inc.rxp_compiler_version = compiler_version;
+ rofi_inc.timestamp = timestamp;
+ rofi_inc.number_of_entries =
+ (rule_dbi_len/sizeof(struct rxp_rof_entry));
+ rofi_inc_p = &rofi_inc;
+ }
+ ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
+ &rofi, &rof_inc, rofi_inc_p);
+ if (rofi->number_of_entries == 0) {
+ cn9k_ree_dbg("No change to rule db");
+ ret = 0;
+ goto free_structs;
+ }
+ rule_dbi_len = rofi->number_of_entries *
+ sizeof(struct rxp_rof_entry);
+ rofi_rof_entries = (char *)rofi->rof_entries;
+ } else {
+ /* full compilation */
+ ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
+ &rofi, NULL, NULL);
+ }
+ if (ret != 0) {
+ cn9k_err("Could not compile rule db");
+ goto free_structs;
+ }
+ rule_db_len = rof->number_of_entries * sizeof(struct rxp_rof_entry);
+ ret = roc_ree_rule_db_prog(vf, (char *)rof->rof_entries, rule_db_len,
+ rofi_rof_entries, rule_dbi_len);
+ if (ret)
+ cn9k_err("Could not program rule db");
+
+free_structs:
+ rxp_free_structs(NULL, NULL, NULL, NULL, NULL, &rof, NULL, &rofi, NULL,
+ 1);
+
+ if (rule_db)
+ rte_free(rule_db);
+
+ return ret;
+}
+#else
+int
+cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev)
+{
+ RTE_SET_USED(dev);
+ return -ENOTSUP;
+}
+#endif
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef _CN9K_REGEXDEV_COMPILER_H_
+#define _CN9K_REGEXDEV_COMPILER_H_
+
+int
+cn9k_ree_rule_db_compile_prog(struct rte_regexdev *dev);
+
+#endif /* _CN9K_REGEXDEV_COMPILER_H_ */
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(C) 2020 Marvell International Ltd.
+#
+
+if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
+ build = false
+ reason = 'only supported on 64-bit Linux'
+ subdir_done()
+endif
+
+lib = cc.find_library('librxp_compiler', required: false)
+if lib.found()
+ ext_deps += lib
+ ext_deps += cc.find_library('libstdc++', required: true)
+ cflags += ['-DREE_COMPILER_SDK']
+endif
+
+sources = files(
+ 'cn9k_regexdev.c',
+ 'cn9k_regexdev_compiler.c',
+)
+
+deps += ['bus_pci', 'regexdev']
+deps += ['common_cnxk', 'mempool_cnxk']
+
--- /dev/null
+DPDK_22 {
+ local: *;
+};
drivers = [
'mlx5',
- 'octeontx2',
+ 'cn9k',
]
std_deps = ['ethdev', 'kvargs'] # 'ethdev' also pulls in mbuf, net, eal etc
+++ /dev/null
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(C) 2020 Marvell International Ltd.
-#
-
-if not is_linux or not dpdk_conf.get('RTE_ARCH_64')
- build = false
- reason = 'only supported on 64-bit Linux'
- subdir_done()
-endif
-
-lib = cc.find_library('librxp_compiler', required: false)
-if lib.found()
- ext_deps += lib
- ext_deps += cc.find_library('libstdc++', required: true)
- cflags += ['-DREE_COMPILER_SDK']
-endif
-
-sources = files(
- 'otx2_regexdev.c',
- 'otx2_regexdev_compiler.c',
- 'otx2_regexdev_hw_access.c',
- 'otx2_regexdev_mbox.c',
-)
-
-deps += ['bus_pci', 'common_octeontx2', 'regexdev']
-
-includes += include_directories('../../common/octeontx2')
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#include <stdio.h>
-#include <unistd.h>
-
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_regexdev.h>
-#include <rte_regexdev_core.h>
-#include <rte_regexdev_driver.h>
-
-
-/* REE common headers */
-#include "otx2_common.h"
-#include "otx2_dev.h"
-#include "otx2_regexdev.h"
-#include "otx2_regexdev_compiler.h"
-#include "otx2_regexdev_hw_access.h"
-#include "otx2_regexdev_mbox.h"
-
-
-/* HW matches are at offset 0x80 from RES_PTR_ADDR
- * In op structure matches starts at W5 (0x28)
- * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
- * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
- */
-#define REE_NUM_MATCHES_ALIGN 11
-/* The REE co-processor will write up to 254 job match structures
- * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
- */
-#define REE_MATCH_OFFSET 0x80
-
-#define REE_MAX_RULES_PER_GROUP 0xFFFF
-#define REE_MAX_GROUPS 0xFFFF
-
-/* This is temporarily here */
-#define REE0_PF 19
-#define REE1_PF 20
-
-#define REE_RULE_DB_VERSION 2
-#define REE_RULE_DB_REVISION 0
-
-struct ree_rule_db_entry {
- uint8_t type;
- uint32_t addr;
- uint64_t value;
-};
-
-struct ree_rule_db {
- uint32_t version;
- uint32_t revision;
- uint32_t number_of_entries;
- struct ree_rule_db_entry entries[];
-} __rte_packed;
-
-static void
-qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
-{
- snprintf(name, size, "otx2_ree_lf_mem_%u:%u", dev_id, qp_id);
-}
-
-static struct otx2_ree_qp *
-ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- uint64_t pg_sz = sysconf(_SC_PAGESIZE);
- struct otx2_ree_vf *vf = &data->vf;
- const struct rte_memzone *lf_mem;
- uint32_t len, iq_len, size_div2;
- char name[RTE_MEMZONE_NAMESIZE];
- uint64_t used_len, iova;
- struct otx2_ree_qp *qp;
- uint8_t *va;
- int ret;
-
- /* Allocate queue pair */
- qp = rte_zmalloc("OCTEON TX2 Regex PMD Queue Pair", sizeof(*qp),
- OTX2_ALIGN);
- if (qp == NULL) {
- otx2_err("Could not allocate queue pair");
- return NULL;
- }
-
- iq_len = OTX2_REE_IQ_LEN;
-
- /*
- * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
- * and a power of 2.
- * effective queue size to software is (size - 1) * 128
- */
- size_div2 = iq_len >> 1;
-
- /* For pending queue */
- len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
-
- /* So that instruction queues start as pg size aligned */
- len = RTE_ALIGN(len, pg_sz);
-
- /* For instruction queues */
- len += OTX2_REE_IQ_LEN * sizeof(union otx2_ree_inst);
-
- /* Waste after instruction queues */
- len = RTE_ALIGN(len, pg_sz);
-
- qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
- qp_id);
-
- lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
- RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
- RTE_CACHE_LINE_SIZE);
- if (lf_mem == NULL) {
- otx2_err("Could not allocate reserved memzone");
- goto qp_free;
- }
-
- va = lf_mem->addr;
- iova = lf_mem->iova;
-
- memset(va, 0, len);
-
- /* Initialize pending queue */
- qp->pend_q.rid_queue = (struct otx2_ree_rid *)va;
- qp->pend_q.enq_tail = 0;
- qp->pend_q.deq_head = 0;
- qp->pend_q.pending_count = 0;
-
- used_len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
- used_len = RTE_ALIGN(used_len, pg_sz);
- iova += used_len;
-
- qp->iq_dma_addr = iova;
- qp->id = qp_id;
- qp->base = OTX2_REE_LF_BAR2(vf, qp_id);
- qp->otx2_regexdev_jobid = 0;
- qp->write_offset = 0;
-
- ret = otx2_ree_iq_enable(dev, qp, OTX2_REE_QUEUE_HI_PRIO, size_div2);
- if (ret) {
- otx2_err("Could not enable instruction queue");
- goto qp_free;
- }
-
- return qp;
-
-qp_free:
- rte_free(qp);
- return NULL;
-}
-
-static int
-ree_qp_destroy(const struct rte_regexdev *dev, struct otx2_ree_qp *qp)
-{
- const struct rte_memzone *lf_mem;
- char name[RTE_MEMZONE_NAMESIZE];
- int ret;
-
- otx2_ree_iq_disable(qp);
-
- qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
- qp->id);
-
- lf_mem = rte_memzone_lookup(name);
-
- ret = rte_memzone_free(lf_mem);
- if (ret)
- return ret;
-
- rte_free(qp);
-
- return 0;
-}
-
-static int
-ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
- int ret;
-
- ree_func_trace("Queue=%d", qp_id);
-
- if (qp == NULL)
- return -EINVAL;
-
- ret = ree_qp_destroy(dev, qp);
- if (ret) {
- otx2_err("Could not destroy queue pair %d", qp_id);
- return ret;
- }
-
- data->queue_pairs[qp_id] = NULL;
-
- return 0;
-}
-
-static struct rte_regexdev *
-ree_dev_register(const char *name)
-{
- struct rte_regexdev *dev;
-
- otx2_ree_dbg("Creating regexdev %s\n", name);
-
- /* allocate device structure */
- dev = rte_regexdev_register(name);
- if (dev == NULL) {
- otx2_err("Failed to allocate regex device for %s", name);
- return NULL;
- }
-
- /* allocate private device structure */
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- dev->data->dev_private =
- rte_zmalloc_socket("regexdev device private",
- sizeof(struct otx2_ree_data),
- RTE_CACHE_LINE_SIZE,
- rte_socket_id());
-
- if (dev->data->dev_private == NULL) {
- otx2_err("Cannot allocate memory for dev %s private data",
- name);
-
- rte_regexdev_unregister(dev);
- return NULL;
- }
- }
-
- return dev;
-}
-
-static int
-ree_dev_unregister(struct rte_regexdev *dev)
-{
- otx2_ree_dbg("Closing regex device %s", dev->device->name);
-
- /* free regex device */
- rte_regexdev_unregister(dev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(dev->data->dev_private);
-
- return 0;
-}
-
-static int
-ree_dev_fini(struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct rte_pci_device *pci_dev;
- int i, ret;
-
- ree_func_trace();
-
- for (i = 0; i < data->nb_queue_pairs; i++) {
- ret = ree_queue_pair_release(dev, i);
- if (ret)
- return ret;
- }
-
- ret = otx2_ree_queues_detach(dev);
- if (ret)
- otx2_err("Could not detach queues");
-
- /* TEMP : should be in lib */
- if (data->queue_pairs)
- rte_free(data->queue_pairs);
- if (data->rules)
- rte_free(data->rules);
-
- pci_dev = container_of(dev->device, struct rte_pci_device, device);
- otx2_dev_fini(pci_dev, &(data->vf.otx2_dev));
-
- ret = ree_dev_unregister(dev);
- if (ret)
- otx2_err("Could not destroy PMD");
-
- return ret;
-}
-
-static inline int
-ree_enqueue(struct otx2_ree_qp *qp, struct rte_regex_ops *op,
- struct otx2_ree_pending_queue *pend_q)
-{
- union otx2_ree_inst inst;
- union otx2_ree_res *res;
- uint32_t offset;
-
- if (unlikely(pend_q->pending_count >= OTX2_REE_DEFAULT_CMD_QLEN)) {
- otx2_err("Pending count %" PRIu64 " is greater than Q size %d",
- pend_q->pending_count, OTX2_REE_DEFAULT_CMD_QLEN);
- return -EAGAIN;
- }
- if (unlikely(op->mbuf->data_len > OTX2_REE_MAX_PAYLOAD_SIZE ||
- op->mbuf->data_len == 0)) {
- otx2_err("Packet length %d is greater than MAX payload %d",
- op->mbuf->data_len, OTX2_REE_MAX_PAYLOAD_SIZE);
- return -EAGAIN;
- }
-
- /* W 0 */
- inst.cn98xx.ooj = 1;
- inst.cn98xx.dg = 0;
- inst.cn98xx.doneint = 0;
- /* W 1 */
- inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
- /* W 2 */
- inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
- inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
-
- /* W 3 */
- inst.cn98xx.res_ptr_addr = (uint64_t)op;
- /* W 4 */
- inst.cn98xx.wq_ptr = 0;
- /* W 5 */
- inst.cn98xx.ggrp = 0;
- inst.cn98xx.tt = 0;
- inst.cn98xx.tag = 0;
- /* W 6 */
- inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
- if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
- inst.cn98xx.ree_job_ctrl = (0x2 << 8);
- else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
- inst.cn98xx.ree_job_ctrl = (0x1 << 8);
- else
- inst.cn98xx.ree_job_ctrl = 0;
- inst.cn98xx.ree_job_id = qp->otx2_regexdev_jobid;
- /* W 7 */
- inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
- if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
- inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
- else
- inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
- if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
- inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
- else
- inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
- if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
- inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
- else
- inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
-
- /* Copy REE command to Q */
- offset = qp->write_offset * sizeof(inst);
- memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
-
- pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
- pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
-
- /* Mark result as not done */
- res = (union otx2_ree_res *)(op);
- res->s.done = 0;
- res->s.ree_err = 0;
-
- /* We will use soft queue length here to limit requests */
- REE_MOD_INC(pend_q->enq_tail, OTX2_REE_DEFAULT_CMD_QLEN);
- pend_q->pending_count += 1;
- REE_MOD_INC(qp->otx2_regexdev_jobid, 0xFFFFFF);
- REE_MOD_INC(qp->write_offset, OTX2_REE_IQ_LEN);
-
- return 0;
-}
-
-static uint16_t
-otx2_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
- struct rte_regex_ops **ops, uint16_t nb_ops)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
- struct otx2_ree_pending_queue *pend_q;
- uint16_t nb_allowed, count = 0;
- struct rte_regex_ops *op;
- int ret;
-
- pend_q = &qp->pend_q;
-
- nb_allowed = OTX2_REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
- if (nb_ops > nb_allowed)
- nb_ops = nb_allowed;
-
- for (count = 0; count < nb_ops; count++) {
- op = ops[count];
- ret = ree_enqueue(qp, op, pend_q);
-
- if (unlikely(ret))
- break;
- }
-
- /*
- * Make sure all instructions are written before DOORBELL is activated
- */
- rte_io_wmb();
-
- /* Update Doorbell */
- otx2_write64(count, qp->base + OTX2_REE_LF_DOORBELL);
-
- return count;
-}
-
-static inline void
-ree_dequeue_post_process(struct rte_regex_ops *ops)
-{
- uint8_t ree_res_mcnt, ree_res_dmcnt;
- int off = REE_MATCH_OFFSET;
- struct ree_res_s_98 *res;
- uint16_t ree_res_status;
- uint64_t match;
-
- res = (struct ree_res_s_98 *)ops;
- /* store res values on stack since ops and res
- * are using the same memory
- */
- ree_res_status = res->ree_res_status;
- ree_res_mcnt = res->ree_res_mcnt;
- ree_res_dmcnt = res->ree_res_dmcnt;
- ops->rsp_flags = 0;
- ops->nb_actual_matches = ree_res_dmcnt;
- ops->nb_matches = ree_res_mcnt;
- if (unlikely(res->ree_err)) {
- ops->nb_actual_matches = 0;
- ops->nb_matches = 0;
- }
-
- if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
- if (ree_res_status & OTX2_REE_STATUS_PMI_SOJ_BIT)
- ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
- if (ree_res_status & OTX2_REE_STATUS_PMI_EOJ_BIT)
- ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
- if (ree_res_status & OTX2_REE_STATUS_ML_CNT_DET_BIT)
- ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
- if (ree_res_status & OTX2_REE_STATUS_MM_CNT_DET_BIT)
- ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
- if (ree_res_status & OTX2_REE_STATUS_MP_CNT_DET_BIT)
- ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
- }
- if (ops->nb_matches > 0) {
- /* Move the matches to the correct offset */
- off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
- ops->nb_matches : REE_NUM_MATCHES_ALIGN);
- match = (uint64_t)ops + REE_MATCH_OFFSET;
- match += (ops->nb_matches - off) *
- sizeof(union otx2_ree_match);
- memcpy((void *)ops->matches, (void *)match,
- off * sizeof(union otx2_ree_match));
- }
-}
-
-static uint16_t
-otx2_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
- struct rte_regex_ops **ops, uint16_t nb_ops)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
- struct otx2_ree_pending_queue *pend_q;
- int i, nb_pending, nb_completed = 0;
- volatile struct ree_res_s_98 *res;
- struct otx2_ree_rid *rid;
-
- pend_q = &qp->pend_q;
-
- nb_pending = pend_q->pending_count;
-
- if (nb_ops > nb_pending)
- nb_ops = nb_pending;
-
- for (i = 0; i < nb_ops; i++) {
- rid = &pend_q->rid_queue[pend_q->deq_head];
- res = (volatile struct ree_res_s_98 *)(rid->rid);
-
- /* Check response header done bit if completed */
- if (unlikely(!res->done))
- break;
-
- ops[i] = (struct rte_regex_ops *)(rid->rid);
- ops[i]->user_id = rid->user_id;
-
- REE_MOD_INC(pend_q->deq_head, OTX2_REE_DEFAULT_CMD_QLEN);
- pend_q->pending_count -= 1;
- }
-
- nb_completed = i;
-
- for (i = 0; i < nb_completed; i++)
- ree_dequeue_post_process(ops[i]);
-
- return nb_completed;
-}
-
-static int
-otx2_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
-
- ree_func_trace();
-
- if (info == NULL)
- return -EINVAL;
-
- info->driver_name = dev->device->driver->name;
- info->dev = dev->device;
-
- info->max_queue_pairs = vf->max_queues;
- info->max_matches = vf->max_matches;
- info->max_payload_size = OTX2_REE_MAX_PAYLOAD_SIZE;
- info->max_rules_per_group = data->max_rules_per_group;
- info->max_groups = data->max_groups;
- info->regexdev_capa = data->regexdev_capa;
- info->rule_flags = data->rule_flags;
-
- return 0;
-}
-
-static int
-otx2_ree_dev_config(struct rte_regexdev *dev,
- const struct rte_regexdev_config *cfg)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- const struct ree_rule_db *rule_db;
- uint32_t rule_db_len;
- int ret;
-
- ree_func_trace();
-
- if (cfg->nb_queue_pairs > vf->max_queues) {
- otx2_err("Invalid number of queue pairs requested");
- return -EINVAL;
- }
-
- if (cfg->nb_max_matches != vf->max_matches) {
- otx2_err("Invalid number of max matches requested");
- return -EINVAL;
- }
-
- if (cfg->dev_cfg_flags != 0) {
- otx2_err("Invalid device configuration flags requested");
- return -EINVAL;
- }
-
- /* Unregister error interrupts */
- if (vf->err_intr_registered)
- otx2_ree_err_intr_unregister(dev);
-
- /* Detach queues */
- if (vf->nb_queues) {
- ret = otx2_ree_queues_detach(dev);
- if (ret) {
- otx2_err("Could not detach REE queues");
- return ret;
- }
- }
-
- /* TEMP : should be in lib */
- if (data->queue_pairs == NULL) { /* first time configuration */
- data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
- sizeof(data->queue_pairs[0]) *
- cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
-
- if (data->queue_pairs == NULL) {
- data->nb_queue_pairs = 0;
- otx2_err("Failed to get memory for qp meta data, nb_queues %u",
- cfg->nb_queue_pairs);
- return -ENOMEM;
- }
- } else { /* re-configure */
- uint16_t old_nb_queues = data->nb_queue_pairs;
- void **qp;
- unsigned int i;
-
- qp = data->queue_pairs;
-
- for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
- ret = ree_queue_pair_release(dev, i);
- if (ret < 0)
- return ret;
- }
-
- qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
- RTE_CACHE_LINE_SIZE);
- if (qp == NULL) {
- otx2_err("Failed to realloc qp meta data, nb_queues %u",
- cfg->nb_queue_pairs);
- return -ENOMEM;
- }
-
- if (cfg->nb_queue_pairs > old_nb_queues) {
- uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
- memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
- }
-
- data->queue_pairs = qp;
- }
- data->nb_queue_pairs = cfg->nb_queue_pairs;
-
- /* Attach queues */
- otx2_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
- ret = otx2_ree_queues_attach(dev, cfg->nb_queue_pairs);
- if (ret) {
- otx2_err("Could not attach queues");
- return -ENODEV;
- }
-
- ret = otx2_ree_msix_offsets_get(dev);
- if (ret) {
- otx2_err("Could not get MSI-X offsets");
- goto queues_detach;
- }
-
- if (cfg->rule_db && cfg->rule_db_len) {
- otx2_ree_dbg("rule_db length %d", cfg->rule_db_len);
- rule_db = (const struct ree_rule_db *)cfg->rule_db;
- rule_db_len = rule_db->number_of_entries *
- sizeof(struct ree_rule_db_entry);
- otx2_ree_dbg("rule_db number of entries %d",
- rule_db->number_of_entries);
- if (rule_db_len > cfg->rule_db_len) {
- otx2_err("Could not program rule db");
- ret = -EINVAL;
- goto queues_detach;
- }
- ret = otx2_ree_rule_db_prog(dev, (const char *)rule_db->entries,
- rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
- if (ret) {
- otx2_err("Could not program rule db");
- goto queues_detach;
- }
- }
-
- dev->enqueue = otx2_ree_enqueue_burst;
- dev->dequeue = otx2_ree_dequeue_burst;
-
- rte_mb();
- return 0;
-
-queues_detach:
- otx2_ree_queues_detach(dev);
- return ret;
-}
-
-static int
-otx2_ree_stop(struct rte_regexdev *dev)
-{
- RTE_SET_USED(dev);
-
- ree_func_trace();
- return 0;
-}
-
-static int
-otx2_ree_start(struct rte_regexdev *dev)
-{
- uint32_t rule_db_len = 0;
- int ret;
-
- ree_func_trace();
-
- ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, NULL);
- if (ret)
- return ret;
- if (rule_db_len == 0) {
- otx2_err("Rule db not programmed");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-otx2_ree_close(struct rte_regexdev *dev)
-{
- return ree_dev_fini(dev);
-}
-
-static int
-otx2_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
- const struct rte_regexdev_qp_conf *qp_conf)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_qp *qp;
-
- ree_func_trace("Queue=%d", qp_id);
-
- if (data->queue_pairs[qp_id] != NULL)
- ree_queue_pair_release(dev, qp_id);
-
- if (qp_conf->nb_desc > OTX2_REE_DEFAULT_CMD_QLEN) {
- otx2_err("Could not setup queue pair for %u descriptors",
- qp_conf->nb_desc);
- return -EINVAL;
- }
- if (qp_conf->qp_conf_flags != 0) {
- otx2_err("Could not setup queue pair with configuration flags 0x%x",
- qp_conf->qp_conf_flags);
- return -EINVAL;
- }
-
- qp = ree_qp_create(dev, qp_id);
- if (qp == NULL) {
- otx2_err("Could not create queue pair %d", qp_id);
- return -ENOMEM;
- }
- qp->cb = qp_conf->cb;
- data->queue_pairs[qp_id] = qp;
-
- return 0;
-}
-
-static int
-otx2_ree_rule_db_compile_activate(struct rte_regexdev *dev)
-{
- return otx2_ree_rule_db_compile_prog(dev);
-}
-
-static int
-otx2_ree_rule_db_update(struct rte_regexdev *dev,
- const struct rte_regexdev_rule *rules, uint16_t nb_rules)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct rte_regexdev_rule *old_ptr;
- uint32_t i, sum_nb_rules;
-
- ree_func_trace("nb_rules=%d", nb_rules);
-
- for (i = 0; i < nb_rules; i++) {
- if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
- break;
- if (rules[i].group_id >= data->max_groups)
- break;
- if (rules[i].rule_id >= data->max_rules_per_group)
- break;
- /* logical implication
- * p q p -> q
- * 0 0 1
- * 0 1 1
- * 1 0 0
- * 1 1 1
- */
- if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
- break;
- }
- nb_rules = i;
-
- if (data->nb_rules == 0) {
-
- data->rules = rte_malloc("rte_regexdev_rules",
- nb_rules*sizeof(struct rte_regexdev_rule), 0);
- if (data->rules == NULL)
- return -ENOMEM;
-
- memcpy(data->rules, rules,
- nb_rules*sizeof(struct rte_regexdev_rule));
- data->nb_rules = nb_rules;
- } else {
-
- old_ptr = data->rules;
- sum_nb_rules = data->nb_rules + nb_rules;
- data->rules = rte_realloc(data->rules,
- sum_nb_rules * sizeof(struct rte_regexdev_rule),
- 0);
- if (data->rules == NULL) {
- data->rules = old_ptr;
- return -ENOMEM;
- }
- memcpy(&data->rules[data->nb_rules], rules,
- nb_rules*sizeof(struct rte_regexdev_rule));
- data->nb_rules = sum_nb_rules;
- }
- return nb_rules;
-}
-
-static int
-otx2_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
- uint32_t rule_db_len)
-{
-
- const struct ree_rule_db *ree_rule_db;
- uint32_t ree_rule_db_len;
- int ret;
-
- ree_func_trace("rule_db_len=%d", rule_db_len);
-
- ree_rule_db = (const struct ree_rule_db *)rule_db;
- ree_rule_db_len = ree_rule_db->number_of_entries *
- sizeof(struct ree_rule_db_entry);
- if (ree_rule_db_len > rule_db_len) {
- otx2_err("Could not program rule db");
- return -EINVAL;
- }
- ret = otx2_ree_rule_db_prog(dev, (const char *)ree_rule_db->entries,
- ree_rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
- if (ret) {
- otx2_err("Could not program rule db");
- return -ENOSPC;
- }
- return 0;
-}
-
-static int
-otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
-{
- struct ree_rule_db *ree_rule_db;
- uint32_t rule_dbi_len;
- uint32_t rule_db_len;
- int ret;
-
- ree_func_trace();
-
- ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
- if (ret)
- return ret;
-
- if (rule_db == NULL) {
- rule_db_len += sizeof(struct ree_rule_db);
- return rule_db_len;
- }
-
- ree_rule_db = (struct ree_rule_db *)rule_db;
- ret = otx2_ree_rule_db_get(dev, (char *)ree_rule_db->entries,
- rule_db_len, NULL, 0);
- if (ret) {
- otx2_err("Could not export rule db");
- return -EFAULT;
- }
- ree_rule_db->number_of_entries =
- rule_db_len/sizeof(struct ree_rule_db_entry);
- ree_rule_db->revision = REE_RULE_DB_REVISION;
- ree_rule_db->version = REE_RULE_DB_VERSION;
-
- return 0;
-}
-
-static int
-ree_get_blkaddr(struct otx2_dev *dev)
-{
- int pf;
-
- pf = otx2_get_pf(dev->pf_func);
- if (pf == REE0_PF)
- return RVU_BLOCK_ADDR_REE0;
- else if (pf == REE1_PF)
- return RVU_BLOCK_ADDR_REE1;
- else
- return 0;
-}
-
-static struct rte_regexdev_ops otx2_ree_ops = {
- .dev_info_get = otx2_ree_dev_info_get,
- .dev_configure = otx2_ree_dev_config,
- .dev_qp_setup = otx2_ree_queue_pair_setup,
- .dev_start = otx2_ree_start,
- .dev_stop = otx2_ree_stop,
- .dev_close = otx2_ree_close,
- .dev_attr_get = NULL,
- .dev_attr_set = NULL,
- .dev_rule_db_update = otx2_ree_rule_db_update,
- .dev_rule_db_compile_activate =
- otx2_ree_rule_db_compile_activate,
- .dev_db_import = otx2_ree_rule_db_import,
- .dev_db_export = otx2_ree_rule_db_export,
- .dev_xstats_names_get = NULL,
- .dev_xstats_get = NULL,
- .dev_xstats_by_name_get = NULL,
- .dev_xstats_reset = NULL,
- .dev_selftest = NULL,
- .dev_dump = NULL,
-};
-
-static int
-otx2_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- char name[RTE_REGEXDEV_NAME_MAX_LEN];
- struct otx2_ree_data *data;
- struct otx2_dev *otx2_dev;
- struct rte_regexdev *dev;
- uint8_t max_matches = 0;
- struct otx2_ree_vf *vf;
- uint16_t nb_queues = 0;
- int ret;
-
- rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
- dev = ree_dev_register(name);
- if (dev == NULL) {
- ret = -ENODEV;
- goto exit;
- }
-
- dev->dev_ops = &otx2_ree_ops;
- dev->device = &pci_dev->device;
-
- /* Get private data space allocated */
- data = dev->data->dev_private;
- vf = &data->vf;
-
- otx2_dev = &vf->otx2_dev;
-
- /* Initialize the base otx2_dev object */
- ret = otx2_dev_init(pci_dev, otx2_dev);
- if (ret) {
- otx2_err("Could not initialize otx2_dev");
- goto dev_unregister;
- }
- /* Get REE block address */
- vf->block_address = ree_get_blkaddr(otx2_dev);
- if (!vf->block_address) {
- otx2_err("Could not determine block PF number");
- goto otx2_dev_fini;
- }
-
- /* Get number of queues available on the device */
- ret = otx2_ree_available_queues_get(dev, &nb_queues);
- if (ret) {
- otx2_err("Could not determine the number of queues available");
- goto otx2_dev_fini;
- }
-
- /* Don't exceed the limits set per VF */
- nb_queues = RTE_MIN(nb_queues, OTX2_REE_MAX_QUEUES_PER_VF);
-
- if (nb_queues == 0) {
- otx2_err("No free queues available on the device");
- goto otx2_dev_fini;
- }
-
- vf->max_queues = nb_queues;
-
- otx2_ree_dbg("Max queues supported by device: %d", vf->max_queues);
-
- /* Get number of maximum matches supported on the device */
- ret = otx2_ree_max_matches_get(dev, &max_matches);
- if (ret) {
- otx2_err("Could not determine the maximum matches supported");
- goto otx2_dev_fini;
- }
- /* Don't exceed the limits set per VF */
- max_matches = RTE_MIN(max_matches, OTX2_REE_MAX_MATCHES_PER_VF);
- if (max_matches == 0) {
- otx2_err("Could not determine the maximum matches supported");
- goto otx2_dev_fini;
- }
-
- vf->max_matches = max_matches;
-
- otx2_ree_dbg("Max matches supported by device: %d", vf->max_matches);
- data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
- RTE_REGEX_PCRE_RULE_ANCHORED_F;
- data->regexdev_capa = 0;
- data->max_groups = REE_MAX_GROUPS;
- data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
- data->nb_rules = 0;
-
- dev->state = RTE_REGEXDEV_READY;
- return 0;
-
-otx2_dev_fini:
- otx2_dev_fini(pci_dev, otx2_dev);
-dev_unregister:
- ree_dev_unregister(dev);
-exit:
- otx2_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
- pci_dev->id.vendor_id, pci_dev->id.device_id);
- return ret;
-}
-
-static int
-otx2_ree_pci_remove(struct rte_pci_device *pci_dev)
-{
- char name[RTE_REGEXDEV_NAME_MAX_LEN];
- struct rte_regexdev *dev = NULL;
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
- dev = rte_regexdev_get_device_by_name(name);
-
- if (dev == NULL)
- return -ENODEV;
-
- return ree_dev_fini(dev);
-}
-
-static struct rte_pci_id pci_id_ree_table[] = {
- {
- RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RVU_REE_PF)
- },
- {
- .vendor_id = 0,
- }
-};
-
-static struct rte_pci_driver otx2_regexdev_pmd = {
- .id_table = pci_id_ree_table,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = otx2_ree_pci_probe,
- .remove = otx2_ree_pci_remove,
-};
-
-
-RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_OCTEONTX2_PMD, otx2_regexdev_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_OCTEONTX2_PMD, pci_id_ree_table);
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#ifndef _OTX2_REGEXDEV_H_
-#define _OTX2_REGEXDEV_H_
-
-#include <rte_common.h>
-#include <rte_regexdev.h>
-
-#include "otx2_dev.h"
-
-#define ree_func_trace otx2_ree_dbg
-
-/* Marvell OCTEON TX2 Regex PMD device name */
-#define REGEXDEV_NAME_OCTEONTX2_PMD regex_octeontx2
-
-#define OTX2_REE_MAX_LFS 36
-#define OTX2_REE_MAX_QUEUES_PER_VF 36
-#define OTX2_REE_MAX_MATCHES_PER_VF 254
-
-#define OTX2_REE_MAX_PAYLOAD_SIZE (1 << 14)
-
-#define OTX2_REE_NON_INC_PROG 0
-#define OTX2_REE_INC_PROG 1
-
-#define REE_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
-
-
-/**
- * Device vf data
- */
-struct otx2_ree_vf {
- struct otx2_dev otx2_dev;
- /**< Base class */
- uint16_t max_queues;
- /**< Max queues supported */
- uint8_t nb_queues;
- /**< Number of regex queues attached */
- uint16_t max_matches;
- /**< Max matches supported*/
- uint16_t lf_msixoff[OTX2_REE_MAX_LFS];
- /**< MSI-X offsets */
- uint8_t block_address;
- /**< REE Block Address */
- uint8_t err_intr_registered:1;
- /**< Are error interrupts registered? */
-};
-
-/**
- * Device private data
- */
-struct otx2_ree_data {
- uint32_t regexdev_capa;
- uint64_t rule_flags;
- /**< Feature flags exposes HW/SW features for the given device */
- uint16_t max_rules_per_group;
- /**< Maximum rules supported per subset by this device */
- uint16_t max_groups;
- /**< Maximum subset supported by this device */
- void **queue_pairs;
- /**< Array of pointers to queue pairs. */
- uint16_t nb_queue_pairs;
- /**< Number of device queue pairs. */
- struct otx2_ree_vf vf;
- /**< vf data */
- struct rte_regexdev_rule *rules;
- /**< rules to be compiled */
- uint16_t nb_rules;
- /**< number of rules */
-} __rte_cache_aligned;
-
-struct otx2_ree_rid {
- uintptr_t rid;
- /** Request id of a ree operation */
- uint64_t user_id;
- /* Client data */
- /**< IOVA address of the pattern to be matched. */
-};
-
-struct otx2_ree_pending_queue {
- uint64_t pending_count;
- /** Pending requests count */
- struct otx2_ree_rid *rid_queue;
- /** Array of pending requests */
- uint16_t enq_tail;
- /** Tail of queue to be used for enqueue */
- uint16_t deq_head;
- /** Head of queue to be used for dequeue */
-};
-
-struct otx2_ree_qp {
- uint32_t id;
- /**< Queue pair id */
- uintptr_t base;
- /**< Base address where BAR is mapped */
- struct otx2_ree_pending_queue pend_q;
- /**< Pending queue */
- rte_iova_t iq_dma_addr;
- /**< Instruction queue address */
- uint32_t otx2_regexdev_jobid;
- /**< Job ID */
- uint32_t write_offset;
- /**< write offset */
- regexdev_stop_flush_t cb;
- /**< Callback function called during rte_regex_dev_stop()*/
-};
-
-#endif /* _OTX2_REGEXDEV_H_ */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#include <rte_malloc.h>
-#include <rte_regexdev.h>
-
-#include "otx2_regexdev.h"
-#include "otx2_regexdev_compiler.h"
-#include "otx2_regexdev_mbox.h"
-
-#ifdef REE_COMPILER_SDK
-#include <rxp-compiler.h>
-
-static int
-ree_rule_db_compile(const struct rte_regexdev_rule *rules,
- uint16_t nb_rules, struct rxp_rof **rof, struct rxp_rof **rofi,
- struct rxp_rof *rof_for_incremental_compile,
- struct rxp_rof *rofi_for_incremental_compile)
-{
- /*INPUT*/
- struct rxp_prefix_selection_control_list *prefix_selection_control_list
- = NULL;
- struct rxp_blacklist_data_sample *blacklist_sample_data = NULL;
- struct rxp_rule_ids_to_remove *rule_ids_to_remove = NULL;
- struct rxp_roff *roff_for_incremental_compile = NULL;
-
- /*OPTIONS - setting default values*/
- enum rxp_virtual_prefix_mode virtual_prefix_mode =
- RXP_VIRTUAL_PREFIX_MODE_0;
- enum rxp_prefix_capacity prefix_capacity = RXP_PREFIX_CAPACITY_32K;
- /**< rxp_global_regex_options_flags*/
- enum rxp_compiler_objective objective = RXP_COMPILER_OBJECTIVE_5;
- enum rxp_tpe_data_width tpe_data_width = RXP_TPE_DATA_WIDTH_4;
- uint32_t compiler_options = RXP_COMPILER_OPTIONS_FORCE;
- /**< rxp_compiler_options_flags*/
- enum rxp_verbose_level verbose = RXP_VERBOSE_LEVEL_3;
- enum rxp_version set_rxp_version = RXP_VERSION_V5_8;
- uint32_t compiler_output_flags = 0;
- /**< rxp_compiler_output_flags*/
- uint32_t global_regex_options = 0;
- /**< rxp_global_regex_options_flags*/
- float set_auto_blacklist = 0;
- uint32_t max_rep_max = 65535;
- uint32_t divide_ruleset = 1;
- struct rxp_ruleset ruleset;
- float ptpb_threshold = 0;
- uint32_t set_max = 0;
- uint32_t threads = 1;
-
- /*OUTPUT*/
- struct rxp_rule_direction_analysis *rule_direction_analysis = NULL;
- struct rxp_compilation_statistics *compilation_statistics = NULL;
- struct rxp_prefix_selection_control_list *generated_pscl = NULL;
- struct rxp_uncompiled_rules_log *uncompiled_rules_log = NULL;
- struct rxp_critical_rules_rank *critical_rules_rank = NULL;
- struct rxp_compiled_rules_log *compiled_rules_log = NULL;
- struct rxp_roff *roff = NULL;
-
- uint16_t i;
- int ret;
-
- ruleset.number_of_entries = nb_rules;
- ruleset.rules = rte_malloc("rxp_rule_entry",
- nb_rules*sizeof(struct rxp_rule_entry), 0);
-
- if (ruleset.rules == NULL) {
- otx2_err("Could not allocate memory for rule compilation\n");
- return -EFAULT;
- }
- if (rof_for_incremental_compile)
- compiler_options |= RXP_COMPILER_OPTIONS_INCREMENTAL;
- if (rofi_for_incremental_compile)
- compiler_options |= RXP_COMPILER_OPTIONS_CHECKSUM;
-
- for (i = 0; i < nb_rules; i++) {
- ruleset.rules[i].number_of_prefix_entries = 0;
- ruleset.rules[i].prefix = NULL;
- ruleset.rules[i].rule = rules[i].pcre_rule;
- ruleset.rules[i].rule_id = rules[i].rule_id;
- ruleset.rules[i].subset_id = rules[i].group_id;
- ruleset.rules[i].rule_direction_type =
- RXP_RULE_DIRECTION_TYPE_NONE;
- }
-
- ret = rxp_compile_advanced(
- /*INPUT*/
- &ruleset,
- prefix_selection_control_list,
- rof_for_incremental_compile,
- roff_for_incremental_compile,
- rofi_for_incremental_compile,
- rule_ids_to_remove,
- blacklist_sample_data,
-
- /*OPTIONS*/
- compiler_options,
- prefix_capacity,
- global_regex_options,
- set_auto_blacklist,
- set_max,
- objective,
- ptpb_threshold,
- max_rep_max,
- threads,
- set_rxp_version,
- verbose,
- tpe_data_width,
- virtual_prefix_mode,
- compiler_output_flags,
- divide_ruleset,
-
- /*OUTPUT*/
- &compilation_statistics,
- &compiled_rules_log,
- &critical_rules_rank,
- &rule_direction_analysis,
- &uncompiled_rules_log,
- rof,
- &roff,
- rofi,
- &generated_pscl);
- rte_free(ruleset.rules);
-
- return ret;
-}
-
-int
-otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- char compiler_version[] = "20.5.2.eda0fa2";
- char timestamp[] = "19700101_000001";
- uint32_t rule_db_len, rule_dbi_len;
- struct rxp_rof *rofi_inc_p = NULL;
- struct rxp_rof_entry rule_dbi[6];
- char *rofi_rof_entries = NULL;
- struct rxp_rof *rofi = NULL;
- struct rxp_rof *rof = NULL;
- struct rxp_rof rofi_inc;
- struct rxp_rof rof_inc;
- char *rule_db = NULL;
- int ret;
-
- ree_func_trace();
-
- ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
- if (ret != 0) {
- otx2_err("Could not get rule db length");
- return ret;
- }
-
- if (rule_db_len > 0) {
- otx2_ree_dbg("Incremental compile, rule db len %d rule dbi len %d",
- rule_db_len, rule_dbi_len);
- rule_db = rte_malloc("ree_rule_db", rule_db_len, 0);
- if (!rule_db) {
- otx2_err("Could not allocate memory for rule db");
- return -EFAULT;
- }
-
- ret = otx2_ree_rule_db_get(dev, rule_db, rule_db_len,
- (char *)rule_dbi, rule_dbi_len);
- if (ret) {
- otx2_err("Could not read rule db");
- rte_free(rule_db);
- return -EFAULT;
- }
- rof_inc.rof_revision = 0;
- rof_inc.rof_version = 2;
- rof_inc.rof_entries = (struct rxp_rof_entry *)rule_db;
- rof_inc.rxp_compiler_version = compiler_version;
- rof_inc.timestamp = timestamp;
- rof_inc.number_of_entries =
- (rule_db_len/sizeof(struct rxp_rof_entry));
-
- if (rule_dbi_len > 0) {
- /* incremental compilation not the first time */
- rofi_inc.rof_revision = 0;
- rofi_inc.rof_version = 2;
- rofi_inc.rof_entries = rule_dbi;
- rofi_inc.rxp_compiler_version = compiler_version;
- rofi_inc.timestamp = timestamp;
- rofi_inc.number_of_entries =
- (rule_dbi_len/sizeof(struct rxp_rof_entry));
- rofi_inc_p = &rofi_inc;
- }
- ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
- &rofi, &rof_inc, rofi_inc_p);
- if (rofi->number_of_entries == 0) {
- otx2_ree_dbg("No change to rule db");
- ret = 0;
- goto free_structs;
- }
- rule_dbi_len = rofi->number_of_entries *
- sizeof(struct rxp_rof_entry);
- rofi_rof_entries = (char *)rofi->rof_entries;
- } else {
- /* full compilation */
- ret = ree_rule_db_compile(data->rules, data->nb_rules, &rof,
- &rofi, NULL, NULL);
- }
- if (ret != 0) {
- otx2_err("Could not compile rule db");
- goto free_structs;
- }
- rule_db_len = rof->number_of_entries * sizeof(struct rxp_rof_entry);
- ret = otx2_ree_rule_db_prog(dev, (char *)rof->rof_entries, rule_db_len,
- rofi_rof_entries, rule_dbi_len);
- if (ret)
- otx2_err("Could not program rule db");
-
-free_structs:
- rxp_free_structs(NULL, NULL, NULL, NULL, NULL, &rof, NULL, &rofi, NULL,
- 1);
-
- if (rule_db)
- rte_free(rule_db);
-
- return ret;
-}
-#else
-int
-otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev)
-{
- RTE_SET_USED(dev);
- return -ENOTSUP;
-}
-#endif
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#ifndef _OTX2_REGEXDEV_COMPILER_H_
-#define _OTX2_REGEXDEV_COMPILER_H_
-
-int
-otx2_ree_rule_db_compile_prog(struct rte_regexdev *dev);
-
-#endif /* _OTX2_REGEXDEV_COMPILER_H_ */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#include "otx2_common.h"
-#include "otx2_dev.h"
-#include "otx2_regexdev_hw_access.h"
-#include "otx2_regexdev_mbox.h"
-
-static void
-ree_lf_err_intr_handler(void *param)
-{
- uintptr_t base = (uintptr_t)param;
- uint8_t lf_id;
- uint64_t intr;
-
- lf_id = (base >> 12) & 0xFF;
-
- intr = otx2_read64(base + OTX2_REE_LF_MISC_INT);
- if (intr == 0)
- return;
-
- otx2_ree_dbg("LF %d MISC_INT: 0x%" PRIx64 "", lf_id, intr);
-
- /* Clear interrupt */
- otx2_write64(intr, base + OTX2_REE_LF_MISC_INT);
-}
-
-static void
-ree_lf_err_intr_unregister(const struct rte_regexdev *dev, uint16_t msix_off,
- uintptr_t base)
-{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
- struct rte_intr_handle *handle = pci_dev->intr_handle;
-
- /* Disable error interrupts */
- otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
-
- otx2_unregister_irq(handle, ree_lf_err_intr_handler, (void *)base,
- msix_off);
-}
-
-void
-otx2_ree_err_intr_unregister(const struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- uintptr_t base;
- uint32_t i;
-
- for (i = 0; i < vf->nb_queues; i++) {
- base = OTX2_REE_LF_BAR2(vf, i);
- ree_lf_err_intr_unregister(dev, vf->lf_msixoff[i], base);
- }
-
- vf->err_intr_registered = 0;
-}
-
-static int
-ree_lf_err_intr_register(const struct rte_regexdev *dev, uint16_t msix_off,
- uintptr_t base)
-{
- struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
- struct rte_intr_handle *handle = pci_dev->intr_handle;
- int ret;
-
- /* Disable error interrupts */
- otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1C);
-
- /* Register error interrupt handler */
- ret = otx2_register_irq(handle, ree_lf_err_intr_handler, (void *)base,
- msix_off);
- if (ret)
- return ret;
-
- /* Enable error interrupts */
- otx2_write64(~0ull, base + OTX2_REE_LF_MISC_INT_ENA_W1S);
-
- return 0;
-}
-
-int
-otx2_ree_err_intr_register(const struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- uint32_t i, j, ret;
- uintptr_t base;
-
- for (i = 0; i < vf->nb_queues; i++) {
- if (vf->lf_msixoff[i] == MSIX_VECTOR_INVALID) {
- otx2_err("Invalid REE LF MSI-X offset: 0x%x",
- vf->lf_msixoff[i]);
- return -EINVAL;
- }
- }
-
- for (i = 0; i < vf->nb_queues; i++) {
- base = OTX2_REE_LF_BAR2(vf, i);
- ret = ree_lf_err_intr_register(dev, vf->lf_msixoff[i], base);
- if (ret)
- goto intr_unregister;
- }
-
- vf->err_intr_registered = 1;
- return 0;
-
-intr_unregister:
- /* Unregister the ones already registered */
- for (j = 0; j < i; j++) {
- base = OTX2_REE_LF_BAR2(vf, j);
- ree_lf_err_intr_unregister(dev, vf->lf_msixoff[j], base);
- }
- return ret;
-}
-
-int
-otx2_ree_iq_enable(const struct rte_regexdev *dev, const struct otx2_ree_qp *qp,
- uint8_t pri, uint32_t size_div2)
-{
- union otx2_ree_lf_sbuf_addr base;
- union otx2_ree_lf_ena lf_ena;
-
- /* Set instruction queue size and priority */
- otx2_ree_config_lf(dev, qp->id, pri, size_div2);
-
- /* Set instruction queue base address */
- /* Should be written after SBUF_CTL and before LF_ENA */
-
- base.u = otx2_read64(qp->base + OTX2_REE_LF_SBUF_ADDR);
- base.s.ptr = qp->iq_dma_addr >> 7;
- otx2_write64(base.u, qp->base + OTX2_REE_LF_SBUF_ADDR);
-
- /* Enable instruction queue */
-
- lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
- lf_ena.s.ena = 1;
- otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
-
- return 0;
-}
-
-void
-otx2_ree_iq_disable(struct otx2_ree_qp *qp)
-{
- union otx2_ree_lf_ena lf_ena;
-
- /* Stop instruction execution */
- lf_ena.u = otx2_read64(qp->base + OTX2_REE_LF_ENA);
- lf_ena.s.ena = 0x0;
- otx2_write64(lf_ena.u, qp->base + OTX2_REE_LF_ENA);
-}
-
-int
-otx2_ree_max_matches_get(const struct rte_regexdev *dev, uint8_t *max_matches)
-{
- union otx2_ree_af_reexm_max_match reexm_max_match;
- int ret;
-
- ret = otx2_ree_af_reg_read(dev, REE_AF_REEXM_MAX_MATCH,
- &reexm_max_match.u);
- if (ret)
- return ret;
-
- *max_matches = reexm_max_match.s.max;
- return 0;
-}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#ifndef _OTX2_REGEXDEV_HW_ACCESS_H_
-#define _OTX2_REGEXDEV_HW_ACCESS_H_
-
-#include <stdint.h>
-
-#include "otx2_regexdev.h"
-
-/* REE instruction queue length */
-#define OTX2_REE_IQ_LEN (1 << 13)
-
-#define OTX2_REE_DEFAULT_CMD_QLEN OTX2_REE_IQ_LEN
-
-/* Status register bits */
-#define OTX2_REE_STATUS_PMI_EOJ_BIT (1 << 14)
-#define OTX2_REE_STATUS_PMI_SOJ_BIT (1 << 13)
-#define OTX2_REE_STATUS_MP_CNT_DET_BIT (1 << 7)
-#define OTX2_REE_STATUS_MM_CNT_DET_BIT (1 << 6)
-#define OTX2_REE_STATUS_ML_CNT_DET_BIT (1 << 5)
-#define OTX2_REE_STATUS_MST_CNT_DET_BIT (1 << 4)
-#define OTX2_REE_STATUS_MPT_CNT_DET_BIT (1 << 3)
-
-/* Register offsets */
-/* REE LF registers */
-#define OTX2_REE_LF_DONE_INT 0x120ull
-#define OTX2_REE_LF_DONE_INT_W1S 0x130ull
-#define OTX2_REE_LF_DONE_INT_ENA_W1S 0x138ull
-#define OTX2_REE_LF_DONE_INT_ENA_W1C 0x140ull
-#define OTX2_REE_LF_MISC_INT 0x300ull
-#define OTX2_REE_LF_MISC_INT_W1S 0x310ull
-#define OTX2_REE_LF_MISC_INT_ENA_W1S 0x320ull
-#define OTX2_REE_LF_MISC_INT_ENA_W1C 0x330ull
-#define OTX2_REE_LF_ENA 0x10ull
-#define OTX2_REE_LF_SBUF_ADDR 0x20ull
-#define OTX2_REE_LF_DONE 0x100ull
-#define OTX2_REE_LF_DONE_ACK 0x110ull
-#define OTX2_REE_LF_DONE_WAIT 0x148ull
-#define OTX2_REE_LF_DOORBELL 0x400ull
-#define OTX2_REE_LF_OUTSTAND_JOB 0x410ull
-
-/* BAR 0 */
-#define OTX2_REE_AF_QUE_SBUF_CTL(a) (0x1200ull | (uint64_t)(a) << 3)
-#define OTX2_REE_PRIV_LF_CFG(a) (0x41000ull | (uint64_t)(a) << 3)
-
-#define OTX2_REE_LF_BAR2(vf, q_id) \
- ((vf)->otx2_dev.bar2 + \
- (((vf)->block_address << 20) | ((q_id) << 12)))
-
-
-#define OTX2_REE_QUEUE_HI_PRIO 0x1
-
-enum ree_desc_type_e {
- REE_TYPE_JOB_DESC = 0x0,
- REE_TYPE_RESULT_DESC = 0x1,
- REE_TYPE_ENUM_LAST = 0x2
-};
-
-union otx2_ree_priv_lf_cfg {
- uint64_t u;
- struct {
- uint64_t slot : 8;
- uint64_t pf_func : 16;
- uint64_t reserved_24_62 : 39;
- uint64_t ena : 1;
- } s;
-};
-
-
-union otx2_ree_lf_sbuf_addr {
- uint64_t u;
- struct {
- uint64_t off : 7;
- uint64_t ptr : 46;
- uint64_t reserved_53_63 : 11;
- } s;
-};
-
-union otx2_ree_lf_ena {
- uint64_t u;
- struct {
- uint64_t ena : 1;
- uint64_t reserved_1_63 : 63;
- } s;
-};
-
-union otx2_ree_af_reexm_max_match {
- uint64_t u;
- struct {
- uint64_t max : 8;
- uint64_t reserved_8_63 : 56;
- } s;
-};
-
-union otx2_ree_lf_done {
- uint64_t u;
- struct {
- uint64_t done : 20;
- uint64_t reserved_20_63 : 44;
- } s;
-};
-
-union otx2_ree_inst {
- uint64_t u[8];
- struct {
- uint64_t doneint : 1;
- uint64_t reserved_1_3 : 3;
- uint64_t dg : 1;
- uint64_t reserved_5_7 : 3;
- uint64_t ooj : 1;
- uint64_t reserved_9_15 : 7;
- uint64_t reserved_16_63 : 48;
- uint64_t inp_ptr_addr : 64;
- uint64_t inp_ptr_ctl : 64;
- uint64_t res_ptr_addr : 64;
- uint64_t wq_ptr : 64;
- uint64_t tag : 32;
- uint64_t tt : 2;
- uint64_t ggrp : 10;
- uint64_t reserved_364_383 : 20;
- uint64_t reserved_384_391 : 8;
- uint64_t ree_job_id : 24;
- uint64_t ree_job_ctrl : 16;
- uint64_t ree_job_length : 15;
- uint64_t reserved_447_447 : 1;
- uint64_t ree_job_subset_id_0 : 16;
- uint64_t ree_job_subset_id_1 : 16;
- uint64_t ree_job_subset_id_2 : 16;
- uint64_t ree_job_subset_id_3 : 16;
- } cn98xx;
-};
-
-union otx2_ree_res_status {
- uint64_t u;
- struct {
- uint64_t job_type : 3;
- uint64_t mpt_cnt_det : 1;
- uint64_t mst_cnt_det : 1;
- uint64_t ml_cnt_det : 1;
- uint64_t mm_cnt_det : 1;
- uint64_t mp_cnt_det : 1;
- uint64_t mode : 2;
- uint64_t reserved_10_11 : 2;
- uint64_t reserved_12_12 : 1;
- uint64_t pmi_soj : 1;
- uint64_t pmi_eoj : 1;
- uint64_t reserved_15_15 : 1;
- uint64_t reserved_16_63 : 48;
- } s;
-};
-
-union otx2_ree_res {
- uint64_t u[8];
- struct ree_res_s_98 {
- uint64_t done : 1;
- uint64_t hwjid : 7;
- uint64_t ree_res_job_id : 24;
- uint64_t ree_res_status : 16;
- uint64_t ree_res_dmcnt : 8;
- uint64_t ree_res_mcnt : 8;
- uint64_t ree_meta_ptcnt : 16;
- uint64_t ree_meta_icnt : 16;
- uint64_t ree_meta_lcnt : 16;
- uint64_t ree_pmi_min_byte_ptr : 16;
- uint64_t ree_err : 1;
- uint64_t reserved_129_190 : 62;
- uint64_t doneint : 1;
- uint64_t reserved_192_255 : 64;
- uint64_t reserved_256_319 : 64;
- uint64_t reserved_320_383 : 64;
- uint64_t reserved_384_447 : 64;
- uint64_t reserved_448_511 : 64;
- } s;
-};
-
-union otx2_ree_match {
- uint64_t u;
- struct {
- uint64_t ree_rule_id : 32;
- uint64_t start_ptr : 14;
- uint64_t reserved_46_47 : 2;
- uint64_t match_length : 15;
- uint64_t reserved_63_63 : 1;
- } s;
-};
-
-void otx2_ree_err_intr_unregister(const struct rte_regexdev *dev);
-
-int otx2_ree_err_intr_register(const struct rte_regexdev *dev);
-
-int otx2_ree_iq_enable(const struct rte_regexdev *dev,
- const struct otx2_ree_qp *qp,
- uint8_t pri, uint32_t size_div128);
-
-void otx2_ree_iq_disable(struct otx2_ree_qp *qp);
-
-int otx2_ree_max_matches_get(const struct rte_regexdev *dev,
- uint8_t *max_matches);
-
-#endif /* _OTX2_REGEXDEV_HW_ACCESS_H_ */
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#include "otx2_common.h"
-#include "otx2_dev.h"
-#include "otx2_regexdev_mbox.h"
-#include "otx2_regexdev.h"
-
-int
-otx2_ree_available_queues_get(const struct rte_regexdev *dev,
- uint16_t *nb_queues)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct free_rsrcs_rsp *rsp;
- struct otx2_dev *otx2_dev;
- int ret;
-
- otx2_dev = &vf->otx2_dev;
- otx2_mbox_alloc_msg_free_rsrc_cnt(otx2_dev->mbox);
-
- ret = otx2_mbox_process_msg(otx2_dev->mbox, (void *)&rsp);
- if (ret)
- return -EIO;
-
- if (vf->block_address == RVU_BLOCK_ADDR_REE0)
- *nb_queues = rsp->ree0;
- else
- *nb_queues = rsp->ree1;
- return 0;
-}
-
-int
-otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct rsrc_attach_req *req;
- struct otx2_mbox *mbox;
-
- /* Ask AF to attach required LFs */
- mbox = vf->otx2_dev.mbox;
- req = otx2_mbox_alloc_msg_attach_resources(mbox);
-
- /* 1 LF = 1 queue */
- req->reelfs = nb_queues;
- req->ree_blkaddr = vf->block_address;
-
- if (otx2_mbox_process(mbox) < 0)
- return -EIO;
-
- /* Update number of attached queues */
- vf->nb_queues = nb_queues;
-
- return 0;
-}
-
-int
-otx2_ree_queues_detach(const struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct rsrc_detach_req *req;
- struct otx2_mbox *mbox;
-
- mbox = vf->otx2_dev.mbox;
- req = otx2_mbox_alloc_msg_detach_resources(mbox);
- req->reelfs = true;
- req->partial = true;
- if (otx2_mbox_process(mbox) < 0)
- return -EIO;
-
- /* Queues have been detached */
- vf->nb_queues = 0;
-
- return 0;
-}
-
-int
-otx2_ree_msix_offsets_get(const struct rte_regexdev *dev)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct msix_offset_rsp *rsp;
- struct otx2_mbox *mbox;
- uint32_t i, ret;
-
- /* Get REE MSI-X vector offsets */
- mbox = vf->otx2_dev.mbox;
- otx2_mbox_alloc_msg_msix_offset(mbox);
-
- ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
- if (ret)
- return ret;
-
- for (i = 0; i < vf->nb_queues; i++) {
- if (vf->block_address == RVU_BLOCK_ADDR_REE0)
- vf->lf_msixoff[i] = rsp->ree0_lf_msixoff[i];
- else
- vf->lf_msixoff[i] = rsp->ree1_lf_msixoff[i];
- otx2_ree_dbg("lf_msixoff[%d] 0x%x", i, vf->lf_msixoff[i]);
- }
-
- return 0;
-}
-
-static int
-ree_send_mbox_msg(struct otx2_ree_vf *vf)
-{
- struct otx2_mbox *mbox = vf->otx2_dev.mbox;
- int ret;
-
- otx2_mbox_msg_send(mbox, 0);
-
- ret = otx2_mbox_wait_for_rsp(mbox, 0);
- if (ret < 0) {
- otx2_err("Could not get mailbox response");
- return ret;
- }
-
- return 0;
-}
-
-int
-otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
- uint32_t size)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct ree_lf_req_msg *req;
- struct otx2_mbox *mbox;
- int ret;
-
- mbox = vf->otx2_dev.mbox;
- req = otx2_mbox_alloc_msg_ree_config_lf(mbox);
-
- req->lf = lf;
- req->pri = pri ? 1 : 0;
- req->size = size;
- req->blkaddr = vf->block_address;
-
- ret = otx2_mbox_process(mbox);
- if (ret < 0) {
- otx2_err("Could not get mailbox response");
- return ret;
- }
- return 0;
-}
-
-int
-otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
- uint64_t *val)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct ree_rd_wr_reg_msg *msg;
- struct otx2_mbox_dev *mdev;
- struct otx2_mbox *mbox;
- int ret, off;
-
- mbox = vf->otx2_dev.mbox;
- mdev = &mbox->dev[0];
- msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
- sizeof(*msg), sizeof(*msg));
- if (msg == NULL) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
-
- msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
- msg->hdr.sig = OTX2_MBOX_REQ_SIG;
- msg->hdr.pcifunc = vf->otx2_dev.pf_func;
- msg->is_write = 0;
- msg->reg_offset = reg;
- msg->ret_val = val;
- msg->blkaddr = vf->block_address;
-
- ret = ree_send_mbox_msg(vf);
- if (ret < 0)
- return ret;
-
- off = mbox->rx_start +
- RTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
- msg = (struct ree_rd_wr_reg_msg *) ((uintptr_t)mdev->mbase + off);
-
- *val = msg->val;
-
- return 0;
-}
-
-int
-otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
- uint64_t val)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct otx2_ree_vf *vf = &data->vf;
- struct ree_rd_wr_reg_msg *msg;
- struct otx2_mbox *mbox;
-
- mbox = vf->otx2_dev.mbox;
- msg = (struct ree_rd_wr_reg_msg *)otx2_mbox_alloc_msg_rsp(mbox, 0,
- sizeof(*msg), sizeof(*msg));
- if (msg == NULL) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
-
- msg->hdr.id = MBOX_MSG_REE_RD_WR_REGISTER;
- msg->hdr.sig = OTX2_MBOX_REQ_SIG;
- msg->hdr.pcifunc = vf->otx2_dev.pf_func;
- msg->is_write = 1;
- msg->reg_offset = reg;
- msg->val = val;
- msg->blkaddr = vf->block_address;
-
- return ree_send_mbox_msg(vf);
-}
-
-int
-otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
- uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct ree_rule_db_get_req_msg *req;
- struct ree_rule_db_get_rsp_msg *rsp;
- char *rule_db_ptr = (char *)rule_db;
- struct otx2_ree_vf *vf = &data->vf;
- struct otx2_mbox *mbox;
- int ret, last = 0;
- uint32_t len = 0;
-
- mbox = vf->otx2_dev.mbox;
- if (!rule_db) {
- otx2_err("Couldn't return rule db due to NULL pointer");
- return -EFAULT;
- }
-
- while (!last) {
- req = (struct ree_rule_db_get_req_msg *)
- otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
- sizeof(*rsp));
- if (!req) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
-
- req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
- req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = vf->otx2_dev.pf_func;
- req->blkaddr = vf->block_address;
- req->is_dbi = 0;
- req->offset = len;
- ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
- if (ret)
- return ret;
- if (rule_db_len < len + rsp->len) {
- otx2_err("Rule db size is too small");
- return -EFAULT;
- }
- otx2_mbox_memcpy(rule_db_ptr, rsp->rule_db, rsp->len);
- len += rsp->len;
- rule_db_ptr = rule_db_ptr + rsp->len;
- last = rsp->is_last;
- }
-
- if (rule_dbi) {
- req = (struct ree_rule_db_get_req_msg *)
- otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
- sizeof(*rsp));
- if (!req) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
-
- req->hdr.id = MBOX_MSG_REE_RULE_DB_GET;
- req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = vf->otx2_dev.pf_func;
- req->blkaddr = vf->block_address;
- req->is_dbi = 1;
- req->offset = 0;
-
- ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
- if (ret)
- return ret;
- if (rule_dbi_len < rsp->len) {
- otx2_err("Rule dbi size is too small");
- return -EFAULT;
- }
- otx2_mbox_memcpy(rule_dbi, rsp->rule_db, rsp->len);
- }
- return 0;
-}
-
-int
-otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
- uint32_t *rule_db_len,
- uint32_t *rule_dbi_len)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- struct ree_rule_db_len_rsp_msg *rsp;
- struct otx2_ree_vf *vf = &data->vf;
- struct ree_req_msg *req;
- struct otx2_mbox *mbox;
- int ret;
-
- mbox = vf->otx2_dev.mbox;
- req = (struct ree_req_msg *)
- otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req), sizeof(*rsp));
- if (!req) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
-
- req->hdr.id = MBOX_MSG_REE_RULE_DB_LEN_GET;
- req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = vf->otx2_dev.pf_func;
- req->blkaddr = vf->block_address;
- ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
- if (ret)
- return ret;
- if (rule_db_len != NULL)
- *rule_db_len = rsp->len;
- if (rule_dbi_len != NULL)
- *rule_dbi_len = rsp->inc_len;
-
- return 0;
-}
-
-static int
-ree_db_msg(const struct rte_regexdev *dev, const char *db, uint32_t db_len,
- int inc, int dbi)
-{
- struct otx2_ree_data *data = dev->data->dev_private;
- uint32_t len_left = db_len, offset = 0;
- struct ree_rule_db_prog_req_msg *req;
- struct otx2_ree_vf *vf = &data->vf;
- const char *rule_db_ptr = db;
- struct otx2_mbox *mbox;
- struct msg_rsp *rsp;
- int ret;
-
- mbox = vf->otx2_dev.mbox;
- while (len_left) {
- req = (struct ree_rule_db_prog_req_msg *)
- otx2_mbox_alloc_msg_rsp(mbox, 0, sizeof(*req),
- sizeof(*rsp));
- if (!req) {
- otx2_err("Could not allocate mailbox message");
- return -EFAULT;
- }
- req->hdr.id = MBOX_MSG_REE_RULE_DB_PROG;
- req->hdr.sig = OTX2_MBOX_REQ_SIG;
- req->hdr.pcifunc = vf->otx2_dev.pf_func;
- req->offset = offset;
- req->total_len = db_len;
- req->len = REE_RULE_DB_REQ_BLOCK_SIZE;
- req->is_incremental = inc;
- req->is_dbi = dbi;
- req->blkaddr = vf->block_address;
-
- if (len_left < REE_RULE_DB_REQ_BLOCK_SIZE) {
- req->is_last = true;
- req->len = len_left;
- }
- otx2_mbox_memcpy(req->rule_db, rule_db_ptr, req->len);
- ret = otx2_mbox_process_msg(mbox, (void *)&rsp);
- if (ret) {
- otx2_err("Programming mailbox processing failed");
- return ret;
- }
- len_left -= req->len;
- offset += req->len;
- rule_db_ptr = rule_db_ptr + req->len;
- }
- return 0;
-}
-
-int
-otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
- uint32_t rule_db_len, const char *rule_dbi,
- uint32_t rule_dbi_len)
-{
- int inc, ret;
-
- if (rule_db_len == 0) {
- otx2_err("Couldn't program empty rule db");
- return -EFAULT;
- }
- inc = (rule_dbi_len != 0);
- if ((rule_db == NULL) || (inc && (rule_dbi == NULL))) {
- otx2_err("Couldn't program NULL rule db");
- return -EFAULT;
- }
- if (inc) {
- ret = ree_db_msg(dev, rule_dbi, rule_dbi_len, inc, 1);
- if (ret)
- return ret;
- }
- return ree_db_msg(dev, rule_db, rule_db_len, inc, 0);
-}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (C) 2020 Marvell International Ltd.
- */
-
-#ifndef _OTX2_REGEXDEV_MBOX_H_
-#define _OTX2_REGEXDEV_MBOX_H_
-
-#include <rte_regexdev.h>
-
-int otx2_ree_available_queues_get(const struct rte_regexdev *dev,
- uint16_t *nb_queues);
-
-int otx2_ree_queues_attach(const struct rte_regexdev *dev, uint8_t nb_queues);
-
-int otx2_ree_queues_detach(const struct rte_regexdev *dev);
-
-int otx2_ree_msix_offsets_get(const struct rte_regexdev *dev);
-
-int otx2_ree_config_lf(const struct rte_regexdev *dev, uint8_t lf, uint8_t pri,
- uint32_t size);
-
-int otx2_ree_af_reg_read(const struct rte_regexdev *dev, uint64_t reg,
- uint64_t *val);
-
-int otx2_ree_af_reg_write(const struct rte_regexdev *dev, uint64_t reg,
- uint64_t val);
-
-int otx2_ree_rule_db_get(const struct rte_regexdev *dev, char *rule_db,
- uint32_t rule_db_len, char *rule_dbi, uint32_t rule_dbi_len);
-
-int otx2_ree_rule_db_len_get(const struct rte_regexdev *dev,
- uint32_t *rule_db_len, uint32_t *rule_dbi_len);
-
-int otx2_ree_rule_db_prog(const struct rte_regexdev *dev, const char *rule_db,
- uint32_t rule_db_len, const char *rule_dbi,
- uint32_t rule_dbi_len);
-
-#endif /* _OTX2_REGEXDEV_MBOX_H_ */
+++ /dev/null
-DPDK_22 {
- local: *;
-};