1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2020 Marvell International Ltd.
8 #include <rte_malloc.h>
9 #include <rte_memzone.h>
10 #include <rte_regexdev.h>
11 #include <rte_regexdev_core.h>
12 #include <rte_regexdev_driver.h>
15 /* REE common headers */
16 #include "otx2_common.h"
18 #include "otx2_regexdev.h"
19 #include "otx2_regexdev_compiler.h"
20 #include "otx2_regexdev_hw_access.h"
21 #include "otx2_regexdev_mbox.h"
24 /* HW matches are at offset 0x80 from RES_PTR_ADDR
25 * In op structure matches starts at W5 (0x28)
26 * There is a need to copy to 0x28 to 0x80 The matches that are at the tail
27 * Which are 88 B. Each match holds 8 B, so up to 11 matches can be copied
29 #define REE_NUM_MATCHES_ALIGN 11
30 /* The REE co-processor will write up to 254 job match structures
31 * (REE_MATCH_S) starting at address [RES_PTR_ADDR] + 0x80.
33 #define REE_MATCH_OFFSET 0x80
35 #define REE_MAX_RULES_PER_GROUP 0xFFFF
36 #define REE_MAX_GROUPS 0xFFFF
38 /* This is temporarily here */
42 #define REE_RULE_DB_VERSION 2
43 #define REE_RULE_DB_REVISION 0
45 struct ree_rule_db_entry {
54 uint32_t number_of_entries;
55 struct ree_rule_db_entry entries[];
59 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
61 snprintf(name, size, "otx2_ree_lf_mem_%u:%u", dev_id, qp_id);
64 static struct otx2_ree_qp *
65 ree_qp_create(const struct rte_regexdev *dev, uint16_t qp_id)
67 struct otx2_ree_data *data = dev->data->dev_private;
68 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
69 struct otx2_ree_vf *vf = &data->vf;
70 const struct rte_memzone *lf_mem;
71 uint32_t len, iq_len, size_div2;
72 char name[RTE_MEMZONE_NAMESIZE];
73 uint64_t used_len, iova;
74 struct otx2_ree_qp *qp;
78 /* Allocate queue pair */
79 qp = rte_zmalloc("OCTEON TX2 Regex PMD Queue Pair", sizeof(*qp),
82 otx2_err("Could not allocate queue pair");
86 iq_len = OTX2_REE_IQ_LEN;
89 * Queue size must be in units of 128B 2 * REE_INST_S (which is 64B),
91 * effective queue size to software is (size - 1) * 128
93 size_div2 = iq_len >> 1;
95 /* For pending queue */
96 len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
98 /* So that instruction queues start as pg size aligned */
99 len = RTE_ALIGN(len, pg_sz);
101 /* For instruction queues */
102 len += OTX2_REE_IQ_LEN * sizeof(union otx2_ree_inst);
104 /* Waste after instruction queues */
105 len = RTE_ALIGN(len, pg_sz);
107 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
110 lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
111 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
112 RTE_CACHE_LINE_SIZE);
113 if (lf_mem == NULL) {
114 otx2_err("Could not allocate reserved memzone");
123 /* Initialize pending queue */
124 qp->pend_q.rid_queue = (struct otx2_ree_rid *)va;
125 qp->pend_q.enq_tail = 0;
126 qp->pend_q.deq_head = 0;
127 qp->pend_q.pending_count = 0;
129 used_len = iq_len * RTE_ALIGN(sizeof(struct otx2_ree_rid), 8);
130 used_len = RTE_ALIGN(used_len, pg_sz);
133 qp->iq_dma_addr = iova;
135 qp->base = OTX2_REE_LF_BAR2(vf, qp_id);
136 qp->otx2_regexdev_jobid = 0;
137 qp->write_offset = 0;
139 ret = otx2_ree_iq_enable(dev, qp, OTX2_REE_QUEUE_HI_PRIO, size_div2);
141 otx2_err("Could not enable instruction queue");
153 ree_qp_destroy(const struct rte_regexdev *dev, struct otx2_ree_qp *qp)
155 const struct rte_memzone *lf_mem;
156 char name[RTE_MEMZONE_NAMESIZE];
159 otx2_ree_iq_disable(qp);
161 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
164 lf_mem = rte_memzone_lookup(name);
166 ret = rte_memzone_free(lf_mem);
176 ree_queue_pair_release(struct rte_regexdev *dev, uint16_t qp_id)
178 struct otx2_ree_data *data = dev->data->dev_private;
179 struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
182 ree_func_trace("Queue=%d", qp_id);
187 ret = ree_qp_destroy(dev, qp);
189 otx2_err("Could not destroy queue pair %d", qp_id);
193 data->queue_pairs[qp_id] = NULL;
198 static struct rte_regexdev *
199 ree_dev_register(const char *name)
201 struct rte_regexdev *dev;
203 otx2_ree_dbg("Creating regexdev %s\n", name);
205 /* allocate device structure */
206 dev = rte_regexdev_register(name);
208 otx2_err("Failed to allocate regex device for %s", name);
212 /* allocate private device structure */
213 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
214 dev->data->dev_private =
215 rte_zmalloc_socket("regexdev device private",
216 sizeof(struct otx2_ree_data),
220 if (dev->data->dev_private == NULL) {
221 otx2_err("Cannot allocate memory for dev %s private data",
224 rte_regexdev_unregister(dev);
233 ree_dev_unregister(struct rte_regexdev *dev)
235 otx2_ree_dbg("Closing regex device %s", dev->device->name);
237 /* free regex device */
238 rte_regexdev_unregister(dev);
240 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
241 rte_free(dev->data->dev_private);
247 ree_dev_fini(struct rte_regexdev *dev)
249 struct otx2_ree_data *data = dev->data->dev_private;
250 struct rte_pci_device *pci_dev;
255 for (i = 0; i < data->nb_queue_pairs; i++) {
256 ret = ree_queue_pair_release(dev, i);
261 ret = otx2_ree_queues_detach(dev);
263 otx2_err("Could not detach queues");
265 /* TEMP : should be in lib */
266 if (data->queue_pairs)
267 rte_free(data->queue_pairs);
269 rte_free(data->rules);
271 pci_dev = container_of(dev->device, struct rte_pci_device, device);
272 otx2_dev_fini(pci_dev, &(data->vf.otx2_dev));
274 ret = ree_dev_unregister(dev);
276 otx2_err("Could not destroy PMD");
282 ree_enqueue(struct otx2_ree_qp *qp, struct rte_regex_ops *op,
283 struct otx2_ree_pending_queue *pend_q)
285 union otx2_ree_inst inst;
286 union otx2_ree_res *res;
289 if (unlikely(pend_q->pending_count >= OTX2_REE_DEFAULT_CMD_QLEN)) {
290 otx2_err("Pending count %" PRIu64 " is greater than Q size %d",
291 pend_q->pending_count, OTX2_REE_DEFAULT_CMD_QLEN);
294 if (unlikely(op->mbuf->data_len > OTX2_REE_MAX_PAYLOAD_SIZE ||
295 op->mbuf->data_len == 0)) {
296 otx2_err("Packet length %d is greater than MAX payload %d",
297 op->mbuf->data_len, OTX2_REE_MAX_PAYLOAD_SIZE);
304 inst.cn98xx.doneint = 0;
306 inst.cn98xx.inp_ptr_addr = rte_pktmbuf_mtod(op->mbuf, uint64_t);
308 inst.cn98xx.inp_ptr_ctl = op->mbuf->data_len & 0x7FFF;
309 inst.cn98xx.inp_ptr_ctl = inst.cn98xx.inp_ptr_ctl << 32;
312 inst.cn98xx.res_ptr_addr = (uint64_t)op;
314 inst.cn98xx.wq_ptr = 0;
316 inst.cn98xx.ggrp = 0;
320 inst.cn98xx.ree_job_length = op->mbuf->data_len & 0x7FFF;
321 if (op->req_flags & RTE_REGEX_OPS_REQ_STOP_ON_MATCH_F)
322 inst.cn98xx.ree_job_ctrl = (0x2 << 8);
323 else if (op->req_flags & RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F)
324 inst.cn98xx.ree_job_ctrl = (0x1 << 8);
326 inst.cn98xx.ree_job_ctrl = 0;
327 inst.cn98xx.ree_job_id = qp->otx2_regexdev_jobid;
329 inst.cn98xx.ree_job_subset_id_0 = op->group_id0;
330 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F)
331 inst.cn98xx.ree_job_subset_id_1 = op->group_id1;
333 inst.cn98xx.ree_job_subset_id_1 = op->group_id0;
334 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F)
335 inst.cn98xx.ree_job_subset_id_2 = op->group_id2;
337 inst.cn98xx.ree_job_subset_id_2 = op->group_id0;
338 if (op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)
339 inst.cn98xx.ree_job_subset_id_3 = op->group_id3;
341 inst.cn98xx.ree_job_subset_id_3 = op->group_id0;
343 /* Copy REE command to Q */
344 offset = qp->write_offset * sizeof(inst);
345 memcpy((void *)(qp->iq_dma_addr + offset), &inst, sizeof(inst));
347 pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)op;
348 pend_q->rid_queue[pend_q->enq_tail].user_id = op->user_id;
350 /* Mark result as not done */
351 res = (union otx2_ree_res *)(op);
355 /* We will use soft queue length here to limit requests */
356 REE_MOD_INC(pend_q->enq_tail, OTX2_REE_DEFAULT_CMD_QLEN);
357 pend_q->pending_count += 1;
358 REE_MOD_INC(qp->otx2_regexdev_jobid, 0xFFFFFF);
359 REE_MOD_INC(qp->write_offset, OTX2_REE_IQ_LEN);
365 otx2_ree_enqueue_burst(struct rte_regexdev *dev, uint16_t qp_id,
366 struct rte_regex_ops **ops, uint16_t nb_ops)
368 struct otx2_ree_data *data = dev->data->dev_private;
369 struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
370 struct otx2_ree_pending_queue *pend_q;
371 uint16_t nb_allowed, count = 0;
372 struct rte_regex_ops *op;
375 pend_q = &qp->pend_q;
377 nb_allowed = OTX2_REE_DEFAULT_CMD_QLEN - pend_q->pending_count;
378 if (nb_ops > nb_allowed)
381 for (count = 0; count < nb_ops; count++) {
383 ret = ree_enqueue(qp, op, pend_q);
390 * Make sure all instructions are written before DOORBELL is activated
394 /* Update Doorbell */
395 otx2_write64(count, qp->base + OTX2_REE_LF_DOORBELL);
401 ree_dequeue_post_process(struct rte_regex_ops *ops)
403 uint8_t ree_res_mcnt, ree_res_dmcnt;
404 int off = REE_MATCH_OFFSET;
405 struct ree_res_s_98 *res;
406 uint16_t ree_res_status;
409 res = (struct ree_res_s_98 *)ops;
410 /* store res values on stack since ops and res
411 * are using the same memory
413 ree_res_status = res->ree_res_status;
414 ree_res_mcnt = res->ree_res_mcnt;
415 ree_res_dmcnt = res->ree_res_dmcnt;
417 ops->nb_actual_matches = ree_res_dmcnt;
418 ops->nb_matches = ree_res_mcnt;
419 if (unlikely(res->ree_err)) {
420 ops->nb_actual_matches = 0;
424 if (unlikely(ree_res_status != REE_TYPE_RESULT_DESC)) {
425 if (ree_res_status & OTX2_REE_STATUS_PMI_SOJ_BIT)
426 ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
427 if (ree_res_status & OTX2_REE_STATUS_PMI_EOJ_BIT)
428 ops->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
429 if (ree_res_status & OTX2_REE_STATUS_ML_CNT_DET_BIT)
430 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
431 if (ree_res_status & OTX2_REE_STATUS_MM_CNT_DET_BIT)
432 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
433 if (ree_res_status & OTX2_REE_STATUS_MP_CNT_DET_BIT)
434 ops->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
436 if (ops->nb_matches > 0) {
437 /* Move the matches to the correct offset */
438 off = ((ops->nb_matches < REE_NUM_MATCHES_ALIGN) ?
439 ops->nb_matches : REE_NUM_MATCHES_ALIGN);
440 match = (uint64_t)ops + REE_MATCH_OFFSET;
441 match += (ops->nb_matches - off) *
442 sizeof(union otx2_ree_match);
443 memcpy((void *)ops->matches, (void *)match,
444 off * sizeof(union otx2_ree_match));
449 otx2_ree_dequeue_burst(struct rte_regexdev *dev, uint16_t qp_id,
450 struct rte_regex_ops **ops, uint16_t nb_ops)
452 struct otx2_ree_data *data = dev->data->dev_private;
453 struct otx2_ree_qp *qp = data->queue_pairs[qp_id];
454 struct otx2_ree_pending_queue *pend_q;
455 int i, nb_pending, nb_completed = 0;
456 volatile struct ree_res_s_98 *res;
457 struct otx2_ree_rid *rid;
459 pend_q = &qp->pend_q;
461 nb_pending = pend_q->pending_count;
463 if (nb_ops > nb_pending)
466 for (i = 0; i < nb_ops; i++) {
467 rid = &pend_q->rid_queue[pend_q->deq_head];
468 res = (volatile struct ree_res_s_98 *)(rid->rid);
470 /* Check response header done bit if completed */
471 if (unlikely(!res->done))
474 ops[i] = (struct rte_regex_ops *)(rid->rid);
475 ops[i]->user_id = rid->user_id;
477 REE_MOD_INC(pend_q->deq_head, OTX2_REE_DEFAULT_CMD_QLEN);
478 pend_q->pending_count -= 1;
483 for (i = 0; i < nb_completed; i++)
484 ree_dequeue_post_process(ops[i]);
490 otx2_ree_dev_info_get(struct rte_regexdev *dev, struct rte_regexdev_info *info)
492 struct otx2_ree_data *data = dev->data->dev_private;
493 struct otx2_ree_vf *vf = &data->vf;
500 info->driver_name = dev->device->driver->name;
501 info->dev = dev->device;
503 info->max_queue_pairs = vf->max_queues;
504 info->max_matches = vf->max_matches;
505 info->max_payload_size = OTX2_REE_MAX_PAYLOAD_SIZE;
506 info->max_rules_per_group = data->max_rules_per_group;
507 info->max_groups = data->max_groups;
508 info->regexdev_capa = data->regexdev_capa;
509 info->rule_flags = data->rule_flags;
515 otx2_ree_dev_config(struct rte_regexdev *dev,
516 const struct rte_regexdev_config *cfg)
518 struct otx2_ree_data *data = dev->data->dev_private;
519 struct otx2_ree_vf *vf = &data->vf;
520 const struct ree_rule_db *rule_db;
521 uint32_t rule_db_len;
526 if (cfg->nb_queue_pairs > vf->max_queues) {
527 otx2_err("Invalid number of queue pairs requested");
531 if (cfg->nb_max_matches != vf->max_matches) {
532 otx2_err("Invalid number of max matches requested");
536 if (cfg->dev_cfg_flags != 0) {
537 otx2_err("Invalid device configuration flags requested");
541 /* Unregister error interrupts */
542 if (vf->err_intr_registered)
543 otx2_ree_err_intr_unregister(dev);
547 ret = otx2_ree_queues_detach(dev);
549 otx2_err("Could not detach REE queues");
554 /* TEMP : should be in lib */
555 if (data->queue_pairs == NULL) { /* first time configuration */
556 data->queue_pairs = rte_zmalloc("regexdev->queue_pairs",
557 sizeof(data->queue_pairs[0]) *
558 cfg->nb_queue_pairs, RTE_CACHE_LINE_SIZE);
560 if (data->queue_pairs == NULL) {
561 data->nb_queue_pairs = 0;
562 otx2_err("Failed to get memory for qp meta data, nb_queues %u",
563 cfg->nb_queue_pairs);
566 } else { /* re-configure */
567 uint16_t old_nb_queues = data->nb_queue_pairs;
571 qp = data->queue_pairs;
573 for (i = cfg->nb_queue_pairs; i < old_nb_queues; i++) {
574 ret = ree_queue_pair_release(dev, i);
579 qp = rte_realloc(qp, sizeof(qp[0]) * cfg->nb_queue_pairs,
580 RTE_CACHE_LINE_SIZE);
582 otx2_err("Failed to realloc qp meta data, nb_queues %u",
583 cfg->nb_queue_pairs);
587 if (cfg->nb_queue_pairs > old_nb_queues) {
588 uint16_t new_qs = cfg->nb_queue_pairs - old_nb_queues;
589 memset(qp + old_nb_queues, 0, sizeof(qp[0]) * new_qs);
592 data->queue_pairs = qp;
594 data->nb_queue_pairs = cfg->nb_queue_pairs;
597 otx2_ree_dbg("Attach %d queues", cfg->nb_queue_pairs);
598 ret = otx2_ree_queues_attach(dev, cfg->nb_queue_pairs);
600 otx2_err("Could not attach queues");
604 ret = otx2_ree_msix_offsets_get(dev);
606 otx2_err("Could not get MSI-X offsets");
610 if (cfg->rule_db && cfg->rule_db_len) {
611 otx2_ree_dbg("rule_db length %d", cfg->rule_db_len);
612 rule_db = (const struct ree_rule_db *)cfg->rule_db;
613 rule_db_len = rule_db->number_of_entries *
614 sizeof(struct ree_rule_db_entry);
615 otx2_ree_dbg("rule_db number of entries %d",
616 rule_db->number_of_entries);
617 if (rule_db_len > cfg->rule_db_len) {
618 otx2_err("Could not program rule db");
622 ret = otx2_ree_rule_db_prog(dev, (const char *)rule_db->entries,
623 rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
625 otx2_err("Could not program rule db");
630 dev->enqueue = otx2_ree_enqueue_burst;
631 dev->dequeue = otx2_ree_dequeue_burst;
637 otx2_ree_queues_detach(dev);
642 otx2_ree_stop(struct rte_regexdev *dev)
651 otx2_ree_start(struct rte_regexdev *dev)
653 uint32_t rule_db_len = 0;
658 ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, NULL);
661 if (rule_db_len == 0) {
662 otx2_err("Rule db not programmed");
670 otx2_ree_close(struct rte_regexdev *dev)
672 return ree_dev_fini(dev);
676 otx2_ree_queue_pair_setup(struct rte_regexdev *dev, uint16_t qp_id,
677 const struct rte_regexdev_qp_conf *qp_conf)
679 struct otx2_ree_data *data = dev->data->dev_private;
680 struct otx2_ree_qp *qp;
682 ree_func_trace("Queue=%d", qp_id);
684 if (data->queue_pairs[qp_id] != NULL)
685 ree_queue_pair_release(dev, qp_id);
687 if (qp_conf->nb_desc > OTX2_REE_DEFAULT_CMD_QLEN) {
688 otx2_err("Could not setup queue pair for %u descriptors",
692 if (qp_conf->qp_conf_flags != 0) {
693 otx2_err("Could not setup queue pair with configuration flags 0x%x",
694 qp_conf->qp_conf_flags);
698 qp = ree_qp_create(dev, qp_id);
700 otx2_err("Could not create queue pair %d", qp_id);
703 qp->cb = qp_conf->cb;
704 data->queue_pairs[qp_id] = qp;
710 otx2_ree_rule_db_compile_activate(struct rte_regexdev *dev)
712 return otx2_ree_rule_db_compile_prog(dev);
716 otx2_ree_rule_db_update(struct rte_regexdev *dev,
717 const struct rte_regexdev_rule *rules, uint16_t nb_rules)
719 struct otx2_ree_data *data = dev->data->dev_private;
720 struct rte_regexdev_rule *old_ptr;
721 uint32_t i, sum_nb_rules;
723 ree_func_trace("nb_rules=%d", nb_rules);
725 for (i = 0; i < nb_rules; i++) {
726 if (rules[i].op == RTE_REGEX_RULE_OP_REMOVE)
728 if (rules[i].group_id >= data->max_groups)
730 if (rules[i].rule_id >= data->max_rules_per_group)
732 /* logical implication
739 if ((~(rules[i].rule_flags) | data->rule_flags) == 0)
744 if (data->nb_rules == 0) {
746 data->rules = rte_malloc("rte_regexdev_rules",
747 nb_rules*sizeof(struct rte_regexdev_rule), 0);
748 if (data->rules == NULL)
751 memcpy(data->rules, rules,
752 nb_rules*sizeof(struct rte_regexdev_rule));
753 data->nb_rules = nb_rules;
756 old_ptr = data->rules;
757 sum_nb_rules = data->nb_rules + nb_rules;
758 data->rules = rte_realloc(data->rules,
759 sum_nb_rules * sizeof(struct rte_regexdev_rule),
761 if (data->rules == NULL) {
762 data->rules = old_ptr;
765 memcpy(&data->rules[data->nb_rules], rules,
766 nb_rules*sizeof(struct rte_regexdev_rule));
767 data->nb_rules = sum_nb_rules;
773 otx2_ree_rule_db_import(struct rte_regexdev *dev, const char *rule_db,
774 uint32_t rule_db_len)
777 const struct ree_rule_db *ree_rule_db;
778 uint32_t ree_rule_db_len;
781 ree_func_trace("rule_db_len=%d", rule_db_len);
783 ree_rule_db = (const struct ree_rule_db *)rule_db;
784 ree_rule_db_len = ree_rule_db->number_of_entries *
785 sizeof(struct ree_rule_db_entry);
786 if (ree_rule_db_len > rule_db_len) {
787 otx2_err("Could not program rule db");
790 ret = otx2_ree_rule_db_prog(dev, (const char *)ree_rule_db->entries,
791 ree_rule_db_len, NULL, OTX2_REE_NON_INC_PROG);
793 otx2_err("Could not program rule db");
800 otx2_ree_rule_db_export(struct rte_regexdev *dev, char *rule_db)
802 struct ree_rule_db *ree_rule_db;
803 uint32_t rule_dbi_len;
804 uint32_t rule_db_len;
809 ret = otx2_ree_rule_db_len_get(dev, &rule_db_len, &rule_dbi_len);
813 if (rule_db == NULL) {
814 rule_db_len += sizeof(struct ree_rule_db);
818 ree_rule_db = (struct ree_rule_db *)rule_db;
819 ret = otx2_ree_rule_db_get(dev, (char *)ree_rule_db->entries,
820 rule_db_len, NULL, 0);
822 otx2_err("Could not export rule db");
825 ree_rule_db->number_of_entries =
826 rule_db_len/sizeof(struct ree_rule_db_entry);
827 ree_rule_db->revision = REE_RULE_DB_REVISION;
828 ree_rule_db->version = REE_RULE_DB_VERSION;
834 ree_get_blkaddr(struct otx2_dev *dev)
838 pf = otx2_get_pf(dev->pf_func);
840 return RVU_BLOCK_ADDR_REE0;
841 else if (pf == REE1_PF)
842 return RVU_BLOCK_ADDR_REE1;
847 static struct rte_regexdev_ops otx2_ree_ops = {
848 .dev_info_get = otx2_ree_dev_info_get,
849 .dev_configure = otx2_ree_dev_config,
850 .dev_qp_setup = otx2_ree_queue_pair_setup,
851 .dev_start = otx2_ree_start,
852 .dev_stop = otx2_ree_stop,
853 .dev_close = otx2_ree_close,
854 .dev_attr_get = NULL,
855 .dev_attr_set = NULL,
856 .dev_rule_db_update = otx2_ree_rule_db_update,
857 .dev_rule_db_compile_activate =
858 otx2_ree_rule_db_compile_activate,
859 .dev_db_import = otx2_ree_rule_db_import,
860 .dev_db_export = otx2_ree_rule_db_export,
861 .dev_xstats_names_get = NULL,
862 .dev_xstats_get = NULL,
863 .dev_xstats_by_name_get = NULL,
864 .dev_xstats_reset = NULL,
865 .dev_selftest = NULL,
870 otx2_ree_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
871 struct rte_pci_device *pci_dev)
873 char name[RTE_REGEXDEV_NAME_MAX_LEN];
874 struct otx2_ree_data *data;
875 struct otx2_dev *otx2_dev;
876 struct rte_regexdev *dev;
877 uint8_t max_matches = 0;
878 struct otx2_ree_vf *vf;
879 uint16_t nb_queues = 0;
882 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
884 dev = ree_dev_register(name);
890 dev->dev_ops = &otx2_ree_ops;
891 dev->device = &pci_dev->device;
893 /* Get private data space allocated */
894 data = dev->data->dev_private;
897 otx2_dev = &vf->otx2_dev;
899 /* Initialize the base otx2_dev object */
900 ret = otx2_dev_init(pci_dev, otx2_dev);
902 otx2_err("Could not initialize otx2_dev");
905 /* Get REE block address */
906 vf->block_address = ree_get_blkaddr(otx2_dev);
907 if (!vf->block_address) {
908 otx2_err("Could not determine block PF number");
912 /* Get number of queues available on the device */
913 ret = otx2_ree_available_queues_get(dev, &nb_queues);
915 otx2_err("Could not determine the number of queues available");
919 /* Don't exceed the limits set per VF */
920 nb_queues = RTE_MIN(nb_queues, OTX2_REE_MAX_QUEUES_PER_VF);
922 if (nb_queues == 0) {
923 otx2_err("No free queues available on the device");
927 vf->max_queues = nb_queues;
929 otx2_ree_dbg("Max queues supported by device: %d", vf->max_queues);
931 /* Get number of maximum matches supported on the device */
932 ret = otx2_ree_max_matches_get(dev, &max_matches);
934 otx2_err("Could not determine the maximum matches supported");
937 /* Don't exceed the limits set per VF */
938 max_matches = RTE_MIN(max_matches, OTX2_REE_MAX_MATCHES_PER_VF);
939 if (max_matches == 0) {
940 otx2_err("Could not determine the maximum matches supported");
944 vf->max_matches = max_matches;
946 otx2_ree_dbg("Max matches supported by device: %d", vf->max_matches);
947 data->rule_flags = RTE_REGEX_PCRE_RULE_ALLOW_EMPTY_F |
948 RTE_REGEX_PCRE_RULE_ANCHORED_F;
949 data->regexdev_capa = 0;
950 data->max_groups = REE_MAX_GROUPS;
951 data->max_rules_per_group = REE_MAX_RULES_PER_GROUP;
954 dev->state = RTE_REGEXDEV_READY;
958 otx2_dev_fini(pci_dev, otx2_dev);
960 ree_dev_unregister(dev);
962 otx2_err("Could not create device (vendor_id: 0x%x device_id: 0x%x)",
963 pci_dev->id.vendor_id, pci_dev->id.device_id);
968 otx2_ree_pci_remove(struct rte_pci_device *pci_dev)
970 char name[RTE_REGEXDEV_NAME_MAX_LEN];
971 struct rte_regexdev *dev = NULL;
976 rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
978 dev = rte_regexdev_get_device_by_name(name);
983 return ree_dev_fini(dev);
986 static struct rte_pci_id pci_id_ree_table[] = {
988 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
989 PCI_DEVID_OCTEONTX2_RVU_REE_PF)
993 static struct rte_pci_driver otx2_regexdev_pmd = {
994 .id_table = pci_id_ree_table,
995 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
996 .probe = otx2_ree_pci_probe,
997 .remove = otx2_ree_pci_remove,
1001 RTE_PMD_REGISTER_PCI(REGEXDEV_NAME_OCTEONTX2_PMD, otx2_regexdev_pmd);
1002 RTE_PMD_REGISTER_PCI_TABLE(REGEXDEV_NAME_OCTEONTX2_PMD, pci_id_ree_table);