X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fcnxk%2Fcnxk_eventdev.c;h=9deab0829a92568b89fd824461399cc53017c388;hb=0837da2e27ae7a7e324b72c370c451b2e952eaa9;hp=2a387ff959f5c483412de5a1abc62f7a6d0404e0;hpb=7ffa7379965e39abe3fa30ce9b8ced94d3be8526;p=dpdk.git diff --git a/drivers/event/cnxk/cnxk_eventdev.c b/drivers/event/cnxk/cnxk_eventdev.c index 2a387ff959..9deab0829a 100644 --- a/drivers/event/cnxk/cnxk_eventdev.c +++ b/drivers/event/cnxk/cnxk_eventdev.c @@ -2,8 +2,102 @@ * Copyright(C) 2021 Marvell. */ +#include "cnxk_cryptodev_ops.h" #include "cnxk_eventdev.h" +static int +crypto_adapter_qp_setup(const struct rte_cryptodev *cdev, + struct cnxk_cpt_qp *qp) +{ + char name[RTE_MEMPOOL_NAMESIZE]; + uint32_t cache_size, nb_req; + unsigned int req_size; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "cnxk_ca_req_%u:%u", + cdev->data->dev_id, qp->lf.lf_id); + req_size = sizeof(struct cpt_inflight_req); + cache_size = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, qp->lf.nb_desc / 1.5); + nb_req = RTE_MAX(qp->lf.nb_desc, cache_size * rte_lcore_count()); + qp->ca.req_mp = rte_mempool_create(name, nb_req, req_size, cache_size, + 0, NULL, NULL, NULL, NULL, + rte_socket_id(), 0); + if (qp->ca.req_mp == NULL) + return -ENOMEM; + + qp->ca.enabled = true; + + return 0; +} + +int +cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev, + const struct rte_cryptodev *cdev, + int32_t queue_pair_id) +{ + struct cnxk_sso_evdev *sso_evdev = cnxk_sso_pmd_priv(event_dev); + uint32_t adptr_xae_cnt = 0; + struct cnxk_cpt_qp *qp; + int ret; + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { + qp = cdev->data->queue_pairs[qp_id]; + ret = crypto_adapter_qp_setup(cdev, qp); + if (ret) { + cnxk_crypto_adapter_qp_del(cdev, -1); + return ret; + } + adptr_xae_cnt += qp->ca.req_mp->size; + } + } else { + qp = cdev->data->queue_pairs[queue_pair_id]; + ret = crypto_adapter_qp_setup(cdev, qp); + if (ret) + return ret; + adptr_xae_cnt = qp->ca.req_mp->size; + } + + /* Update crypto adapter XAE count */ + sso_evdev->adptr_xae_cnt += adptr_xae_cnt; + cnxk_sso_xae_reconfigure((struct rte_eventdev *)(uintptr_t)event_dev); + + return 0; +} + +static int +crypto_adapter_qp_free(struct cnxk_cpt_qp *qp) +{ + rte_mempool_free(qp->ca.req_mp); + qp->ca.enabled = false; + + return 0; +} + +int +cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev, + int32_t queue_pair_id) +{ + struct cnxk_cpt_qp *qp; + + if (queue_pair_id == -1) { + uint16_t qp_id; + + for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) { + qp = cdev->data->queue_pairs[qp_id]; + if (qp->ca.enabled) + crypto_adapter_qp_free(qp); + } + } else { + qp = cdev->data->queue_pairs[queue_pair_id]; + if (qp->ca.enabled) + crypto_adapter_qp_free(qp); + } + + return 0; +} + void cnxk_sso_info_get(struct cnxk_sso_evdev *dev, struct rte_event_dev_info *dev_info) @@ -77,6 +171,9 @@ cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev) xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT; if (dev->xae_cnt) xaq_cnt += dev->xae_cnt / dev->sso.xae_waes; + else if (dev->adptr_xae_cnt) + xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) + + (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues); else xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) + (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues); @@ -125,6 +222,36 @@ alloc_fail: return rc; } +int +cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + int rc = 0; + + if (event_dev->data->dev_started) + event_dev->dev_ops->dev_stop(event_dev); + + rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues); + if (rc < 0) { + plt_err("Failed to release XAQ %d", rc); + return rc; + } + + rte_mempool_free(dev->xaq_pool); + dev->xaq_pool = NULL; + rc = cnxk_sso_xaq_allocate(dev); + if (rc < 0) { + plt_err("Failed to alloc XAQ %d", rc); + return rc; + } + + rte_mb(); + if (event_dev->data->dev_started) + event_dev->dev_ops->dev_start(event_dev); + + return 0; +} + int cnxk_setup_event_ports(const struct rte_eventdev *event_dev, cnxk_sso_init_hws_mem_t init_hws_fn, @@ -326,6 +453,126 @@ cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns, return 0; } +void +cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + + roc_sso_dump(&dev->sso, dev->sso.nb_hws, dev->sso.nb_hwgrp, f); +} + +static void +cnxk_handle_event(void *arg, struct rte_event event) +{ + struct rte_eventdev *event_dev = arg; + + if (event_dev->dev_ops->dev_stop_flush != NULL) + event_dev->dev_ops->dev_stop_flush( + event_dev->data->dev_id, event, + event_dev->data->dev_stop_flush_arg); +} + +static void +cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, + cnxk_sso_hws_flush_t flush_fn, uint8_t enable) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uintptr_t hwgrp_base; + uint16_t i; + void *ws; + + for (i = 0; i < dev->nb_event_ports; i++) { + ws = event_dev->data->ports[i]; + reset_fn(dev, ws); + } + + rte_mb(); + ws = event_dev->data->ports[0]; + + for (i = 0; i < dev->nb_event_queues; i++) { + /* Consume all the events through HWS0 */ + hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i); + flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev); + /* Enable/Disable SSO GGRP */ + plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL); + } +} + +int +cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, + cnxk_sso_hws_flush_t flush_fn) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt]; + int i, rc; + + plt_sso_dbg(); + for (i = 0; i < dev->qos_queue_cnt; i++) { + qos->hwgrp = dev->qos_parse_data[i].queue; + qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt; + qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt; + qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt; + } + rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt, + dev->xae_cnt); + if (rc < 0) { + plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc); + return -EINVAL; + } + cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true); + rte_mb(); + + return 0; +} + +void +cnxk_sso_stop(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn, + cnxk_sso_hws_flush_t flush_fn) +{ + plt_sso_dbg(); + cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, false); + rte_mb(); +} + +int +cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + uint16_t all_queues[CNXK_SSO_MAX_HWGRP]; + uint16_t i; + void *ws; + + if (!dev->configured) + return 0; + + for (i = 0; i < dev->nb_event_queues; i++) + all_queues[i] = i; + + for (i = 0; i < dev->nb_event_ports; i++) { + ws = event_dev->data->ports[i]; + unlink_fn(dev, ws, all_queues, dev->nb_event_queues); + rte_free(cnxk_sso_hws_get_cookie(ws)); + event_dev->data->ports[i] = NULL; + } + + roc_sso_rsrc_fini(&dev->sso); + rte_mempool_free(dev->xaq_pool); + rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME)); + + dev->fc_iova = 0; + dev->fc_mem = NULL; + dev->xaq_pool = NULL; + dev->configured = false; + dev->is_timeout_deq = 0; + dev->nb_event_ports = 0; + dev->max_num_events = -1; + dev->nb_event_queues = 0; + dev->min_dequeue_timeout_ns = USEC2NSEC(1); + dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF); + + return 0; +} + static void parse_queue_param(char *value, void *opaque) { @@ -418,9 +665,11 @@ cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs) &dev->xae_cnt); rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict, dev); - rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_value, + rte_kvargs_process(kvlist, CNXK_SSO_FORCE_BP, &parse_kvargs_flag, + &dev->force_ena_bp); + rte_kvargs_process(kvlist, CN9K_SSO_SINGLE_WS, &parse_kvargs_flag, &single_ws); - rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_value, + rte_kvargs_process(kvlist, CN10K_SSO_GW_MODE, &parse_kvargs_flag, &dev->gw_mode); dev->dual_ws = !single_ws; rte_kvargs_free(kvlist); @@ -462,6 +711,8 @@ cnxk_sso_init(struct rte_eventdev *event_dev) dev->nb_event_queues = 0; dev->nb_event_ports = 0; + cnxk_tim_init(&dev->sso); + return 0; error: @@ -478,6 +729,7 @@ cnxk_sso_fini(struct rte_eventdev *event_dev) if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + cnxk_tim_fini(); roc_sso_rsrc_fini(&dev->sso); roc_sso_dev_fini(&dev->sso);