1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cnxk_cryptodev_ops.h"
6 #include "cnxk_eventdev.h"
9 crypto_adapter_qp_setup(const struct rte_cryptodev *cdev,
10 struct cnxk_cpt_qp *qp)
12 char name[RTE_MEMPOOL_NAMESIZE];
13 uint32_t cache_size, nb_req;
14 unsigned int req_size;
16 snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u",
17 cdev->data->dev_id, qp->lf.lf_id);
18 req_size = sizeof(struct cpt_inflight_req);
19 cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5);
20 nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count());
21 qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size,
22 0, NULL, NULL, NULL, NULL,
24 if (qp->ca.req_mp == NULL)
27 qp->ca.enabled = true;
33 cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
34 const struct rte_cryptodev *cdev,
35 int32_t queue_pair_id)
37 struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev);
38 uint32_t adptr_xae_cnt = 0;
39 struct cnxk_cpt_qp *qp;
42 if (queue_pair_id == -1) {
45 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
46 qp = cdev->data->queue_pairs[qp_id];
47 ret = crypto_adapter_qp_setup(cdev, qp);
49 cnxk_crypto_adapter_qp_del(cdev, -1);
52 adptr_xae_cnt += qp->ca.req_mp->size;
55 qp = cdev->data->queue_pairs[queue_pair_id];
56 ret = crypto_adapter_qp_setup(cdev, qp);
59 adptr_xae_cnt = qp->ca.req_mp->size;
62 /* Update crypto adapter XAE count */
63 sso_evdev->adptr_xae_cnt += adptr_xae_cnt;
64 cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev);
70 crypto_adapter_qp_free(struct cnxk_cpt_qp *qp)
72 rte_mempool_free(qp->ca.req_mp);
73 qp->ca.enabled = false;
79 cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
80 int32_t queue_pair_id)
82 struct cnxk_cpt_qp *qp;
84 if (queue_pair_id == -1) {
87 for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
88 qp = cdev->data->queue_pairs[qp_id];
90 crypto_adapter_qp_free(qp);
93 qp = cdev->data->queue_pairs[queue_pair_id];
95 crypto_adapter_qp_free(qp);
102 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
103 struct rte_event_dev_info *dev_info)
106 dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
107 dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
108 dev_info->max_event_queues = dev->max_event_queues;
109 dev_info->max_event_queue_flows = (1ULL << 20);
110 dev_info->max_event_queue_priority_levels = 8;
111 dev_info->max_event_priority_levels = 1;
112 dev_info->max_event_ports = dev->max_event_ports;
113 dev_info->max_event_port_dequeue_depth = 1;
114 dev_info->max_event_port_enqueue_depth = 1;
115 dev_info->max_num_events = dev->max_num_events;
116 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
117 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
118 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
119 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
120 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
121 RTE_EVENT_DEV_CAP_NONSEQ_MODE |
122 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
123 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
127 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
132 xae_cnt = dev->sso.iue;
134 xae_cnt += dev->xae_cnt;
135 if (dev->adptr_xae_cnt)
136 xae_cnt += (dev->adptr_xae_cnt);
138 plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
139 rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
141 plt_err("Failed to configure XAQ aura");
144 dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
145 dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
147 return roc_sso_hwgrp_alloc_xaq(
149 roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
150 dev->nb_event_queues);
154 cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
156 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
159 if (event_dev->data->dev_started)
160 event_dev->dev_ops->dev_stop(event_dev);
162 rc = cnxk_sso_xaq_allocate(dev);
164 plt_err("Failed to alloc XAQ %d", rc);
169 if (event_dev->data->dev_started)
170 event_dev->dev_ops->dev_start(event_dev);
176 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
177 cnxk_sso_init_hws_mem_t init_hws_fn,
178 cnxk_sso_hws_setup_t setup_hws_fn)
180 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
183 for (i = 0; i < dev->nb_event_ports; i++) {
184 struct cnxk_sso_hws_cookie *ws_cookie;
187 /* Free memory prior to re-allocation if needed */
188 if (event_dev->data->ports[i] != NULL)
189 ws = event_dev->data->ports[i];
191 ws = init_hws_fn(dev, i);
194 ws_cookie = cnxk_sso_hws_get_cookie(ws);
195 ws_cookie->event_dev = event_dev;
196 ws_cookie->configured = 1;
197 event_dev->data->ports[i] = ws;
198 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
204 for (i = i - 1; i >= 0; i--) {
205 event_dev->data->ports[i] = NULL;
206 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
212 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
213 cnxk_sso_link_t link_fn)
215 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
216 uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
219 for (i = 0; i < dev->nb_event_ports; i++) {
220 uint16_t nb_hwgrp = 0;
222 links_map = event_dev->data->links_map;
223 /* Point links_map to this port specific area */
224 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
226 for (j = 0; j < dev->nb_event_queues; j++) {
227 if (links_map[j] == 0xdead)
233 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
238 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
240 struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
241 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
244 deq_tmo_ns = conf->dequeue_timeout_ns;
247 deq_tmo_ns = dev->min_dequeue_timeout_ns;
248 if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
249 deq_tmo_ns > dev->max_dequeue_timeout_ns) {
250 plt_err("Unsupported dequeue timeout requested");
254 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
255 dev->is_timeout_deq = 1;
257 dev->deq_tmo_ns = deq_tmo_ns;
259 if (!conf->nb_event_queues || !conf->nb_event_ports ||
260 conf->nb_event_ports > dev->max_event_ports ||
261 conf->nb_event_queues > dev->max_event_queues) {
262 plt_err("Unsupported event queues/ports requested");
266 if (conf->nb_event_port_dequeue_depth > 1) {
267 plt_err("Unsupported event port deq depth requested");
271 if (conf->nb_event_port_enqueue_depth > 1) {
272 plt_err("Unsupported event port enq depth requested");
276 roc_sso_rsrc_fini(&dev->sso);
277 roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
279 dev->nb_event_queues = conf->nb_event_queues;
280 dev->nb_event_ports = conf->nb_event_ports;
286 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
287 struct rte_event_queue_conf *queue_conf)
289 RTE_SET_USED(event_dev);
290 RTE_SET_USED(queue_id);
292 queue_conf->nb_atomic_flows = (1ULL << 20);
293 queue_conf->nb_atomic_order_sequences = (1ULL << 20);
294 queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
295 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
299 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
300 const struct rte_event_queue_conf *queue_conf)
302 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
304 plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
305 /* Normalize <0-255> to <0-7> */
306 return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
307 queue_conf->priority / 32);
311 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
313 RTE_SET_USED(event_dev);
314 RTE_SET_USED(queue_id);
318 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
319 struct rte_event_port_conf *port_conf)
321 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
323 RTE_SET_USED(port_id);
324 port_conf->new_event_threshold = dev->max_num_events;
325 port_conf->dequeue_depth = 1;
326 port_conf->enqueue_depth = 1;
330 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
331 cnxk_sso_hws_setup_t hws_setup_fn)
333 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
334 uintptr_t grp_base = 0;
336 plt_sso_dbg("Port=%d", port_id);
337 if (event_dev->data->ports[port_id] == NULL) {
338 plt_err("Invalid port Id %d", port_id);
342 grp_base = roc_sso_hwgrp_base_get(&dev->sso, 0);
344 plt_err("Failed to get grp base addr");
348 hws_setup_fn(dev, event_dev->data->ports[port_id], grp_base);
349 plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
356 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
359 RTE_SET_USED(event_dev);
360 *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
366 cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f)
368 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
370 roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f);
374 cnxk_handle_event(void *arg, struct rte_event event)
376 struct rte_eventdev *event_dev = arg;
378 if (event_dev->dev_ops->dev_stop_flush != NULL)
379 event_dev->dev_ops->dev_stop_flush(
380 event_dev->data->dev_id, event,
381 event_dev->data->dev_stop_flush_arg);
385 cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
386 cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
388 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
389 uintptr_t hwgrp_base;
393 for (i = 0; i < dev->nb_event_ports; i++) {
394 ws = event_dev->data->ports[i];
399 ws = event_dev->data->ports[0];
401 for (i = 0; i < dev->nb_event_queues; i++) {
402 /* Consume all the events through HWS0 */
403 hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
404 flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
405 /* Enable/Disable SSO GGRP */
406 plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
411 cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
412 cnxk_sso_hws_flush_t flush_fn)
414 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
415 struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
419 for (i = 0; i < dev->qos_queue_cnt; i++) {
420 qos->hwgrp = dev->qos_parse_data[i].queue;
421 qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
422 qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
423 qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
425 rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
428 plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
431 cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
438 cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
439 cnxk_sso_hws_flush_t flush_fn)
442 cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false);
447 cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
449 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
450 uint16_t all_queues[CNXK_SSO_MAX_HWGRP];
454 if (!dev->configured)
457 for (i = 0; i < dev->nb_event_queues; i++)
460 for (i = 0; i < dev->nb_event_ports; i++) {
461 ws = event_dev->data->ports[i];
462 unlink_fn(dev, ws, all_queues, dev->nb_event_queues);
463 rte_free(cnxk_sso_hws_get_cookie(ws));
464 event_dev->data->ports[i] = NULL;
467 roc_sso_rsrc_fini(&dev->sso);
470 dev->configured = false;
471 dev->is_timeout_deq = 0;
472 dev->nb_event_ports = 0;
473 dev->max_num_events = -1;
474 dev->nb_event_queues = 0;
475 dev->min_dequeue_timeout_ns = USEC2NSEC(1);
476 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
482 parse_queue_param(char *value, void *opaque)
484 struct cnxk_sso_qos queue_qos = {0};
485 uint16_t *val = (uint16_t *)&queue_qos;
486 struct cnxk_sso_evdev *dev = opaque;
487 char *tok = strtok(value, "-");
488 struct cnxk_sso_qos *old_ptr;
493 while (tok != NULL) {
495 tok = strtok(NULL, "-");
499 if (val != (&queue_qos.iaq_prcnt + 1)) {
500 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
504 dev->qos_queue_cnt++;
505 old_ptr = dev->qos_parse_data;
506 dev->qos_parse_data = rte_realloc(
508 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
509 if (dev->qos_parse_data == NULL) {
510 dev->qos_parse_data = old_ptr;
511 dev->qos_queue_cnt--;
514 dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
518 parse_qos_list(const char *value, void *opaque)
520 char *s = strdup(value);
531 if (start && start < end) {
533 parse_queue_param(start + 1, opaque);
544 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
548 /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
549 * isn't allowed. Everything is expressed in percentages, 0 represents
552 parse_qos_list(value, opaque);
558 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
560 struct rte_kvargs *kvlist;
561 uint8_t single_ws = 0;
565 kvlist = rte_kvargs_parse(devargs->args, NULL);
569 rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
571 rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
573 rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag,
575 rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag,
577 rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag,
579 dev->dual_ws = !single_ws;
580 rte_kvargs_free(kvlist);
584 cnxk_sso_init(struct rte_eventdev *event_dev)
586 const struct rte_memzone *mz = NULL;
587 struct rte_pci_device *pci_dev;
588 struct cnxk_sso_evdev *dev;
591 mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
594 plt_err("Failed to create eventdev memzone");
598 dev = cnxk_sso_pmd_priv(event_dev);
599 pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
600 dev->sso.pci_dev = pci_dev;
602 *(uint64_t *)mz->addr = (uint64_t)dev;
603 cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
605 /* Initialize the base cnxk_dev object */
606 rc = roc_sso_dev_init(&dev->sso);
608 plt_err("Failed to initialize RoC SSO rc=%d", rc);
612 dev->is_timeout_deq = 0;
613 dev->min_dequeue_timeout_ns = 0;
614 dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
615 dev->max_num_events = -1;
616 dev->nb_event_queues = 0;
617 dev->nb_event_ports = 0;
619 cnxk_tim_init(&dev->sso);
624 rte_memzone_free(mz);
629 cnxk_sso_fini(struct rte_eventdev *event_dev)
631 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
633 /* For secondary processes, nothing to be done */
634 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
638 roc_sso_rsrc_fini(&dev->sso);
639 roc_sso_dev_fini(&dev->sso);
645 cnxk_sso_remove(struct rte_pci_device *pci_dev)
647 return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);