+static void
+dpaa2_eventdev_info_get(struct rte_eventdev *dev,
+ struct rte_event_dev_info *dev_info)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ memset(dev_info, 0, sizeof(struct rte_event_dev_info));
+ dev_info->min_dequeue_timeout_ns =
+ DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
+ dev_info->max_dequeue_timeout_ns =
+ DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
+ dev_info->dequeue_timeout_ns =
+ DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+ dev_info->max_event_queues = priv->max_event_queues;
+ dev_info->max_event_queue_flows =
+ DPAA2_EVENT_MAX_QUEUE_FLOWS;
+ dev_info->max_event_queue_priority_levels =
+ DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
+ dev_info->max_event_priority_levels =
+ DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
+ dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
+ /* we only support dpio up to number of cores */
+ if (dev_info->max_event_ports > rte_lcore_count())
+ dev_info->max_event_ports = rte_lcore_count();
+ dev_info->max_event_port_dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ dev_info->max_event_port_enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
+ dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
+ RTE_EVENT_DEV_CAP_BURST_MODE|
+ RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
+ RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
+ RTE_EVENT_DEV_CAP_NONSEQ_MODE;
+
+}
+
+static int
+dpaa2_eventdev_configure(const struct rte_eventdev *dev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct rte_event_dev_config *conf = &dev->data->dev_conf;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ priv->nb_event_queues = conf->nb_event_queues;
+ priv->nb_event_ports = conf->nb_event_ports;
+ priv->nb_event_queue_flows = conf->nb_event_queue_flows;
+ priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
+ priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
+ priv->event_dev_cfg = conf->event_dev_cfg;
+
+ /* Check dequeue timeout method is per dequeue or global */
+ if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
+ /*
+ * Use timeout value as given in dequeue operation.
+ * So invalidating this timeout value.
+ */
+ priv->dequeue_timeout_ns = 0;
+
+ } else if (conf->dequeue_timeout_ns == 0) {
+ priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
+ } else {
+ priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
+ }
+
+ DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
+ dev->data->dev_id);
+ return 0;
+}
+
+static int
+dpaa2_eventdev_start(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_stop(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+}
+
+static int
+dpaa2_eventdev_close(struct rte_eventdev *dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
+ struct rte_event_queue_conf *queue_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+
+ queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
+ queue_conf->nb_atomic_order_sequences =
+ DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
+ queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
+ queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+static int
+dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
+ const struct rte_event_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ switch (queue_conf->schedule_type) {
+ case RTE_SCHED_TYPE_PARALLEL:
+ case RTE_SCHED_TYPE_ATOMIC:
+ case RTE_SCHED_TYPE_ORDERED:
+ break;
+ default:
+ DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
+ return -1;
+ }
+ evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
+ evq_info->event_queue_id = queue_id;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queue_id);
+}
+
+static void
+dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
+ struct rte_event_port_conf *port_conf)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(port_id);
+
+ port_conf->new_event_threshold =
+ DPAA2_EVENT_MAX_NUM_EVENTS;
+ port_conf->dequeue_depth =
+ DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
+ port_conf->enqueue_depth =
+ DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
+ port_conf->disable_implicit_release = 0;
+}
+
+static int
+dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+ char event_port_name[32];
+ struct dpaa2_port *portal;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(port_conf);
+
+ sprintf(event_port_name, "event-port-%d", port_id);
+ portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
+ if (!portal) {
+ DPAA2_EVENTDEV_ERR("Memory allocation failure");
+ return -ENOMEM;
+ }
+
+ memset(portal, 0, sizeof(struct dpaa2_port));
+ dev->data->ports[port_id] = portal;
+ return 0;
+}
+
+static void
+dpaa2_eventdev_port_release(void *port)
+{
+ struct dpaa2_port *portal = port;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ /* TODO: Cleanup is required when ports are in linked state. */
+ if (portal->is_port_linked)
+ DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
+
+ if (portal)
+ rte_free(portal);
+
+ portal = NULL;
+}
+
+static int
+dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
+ const uint8_t queues[], const uint8_t priorities[],
+ uint16_t nb_links)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_eventq *evq_info;
+ uint16_t i;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(priorities);
+
+ for (i = 0; i < nb_links; i++) {
+ evq_info = &priv->evq_info[queues[i]];
+ memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
+ sizeof(struct dpaa2_eventq));
+ dpaa2_portal->evq_info[queues[i]].event_port = port;
+ dpaa2_portal->num_linked_evq++;
+ }
+
+ return (int)nb_links;
+}
+
+static int
+dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
+ uint8_t queues[], uint16_t nb_unlinks)
+{
+ struct dpaa2_port *dpaa2_portal = port;
+ int i;
+ struct dpaa2_dpio_dev *dpio_dev = NULL;
+ struct dpaa2_eventq *evq_info;
+ struct qbman_swp *swp;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(queues);
+
+ for (i = 0; i < nb_unlinks; i++) {
+ evq_info = &dpaa2_portal->evq_info[queues[i]];
+
+ if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
+ /* todo dpaa2_portal shall have dpio_dev-no per lcore*/
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ qbman_swp_push_set(swp,
+ evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }
+ memset(evq_info, 0, sizeof(struct dpaa2_eventq));
+ if (dpaa2_portal->num_linked_evq)
+ dpaa2_portal->num_linked_evq--;
+ }
+
+ if (!dpaa2_portal->num_linked_evq)
+ dpaa2_portal->is_port_linked = false;
+
+ return (int)nb_unlinks;
+}
+
+
+static int
+dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
+ uint64_t *timeout_ticks)
+{
+ uint32_t scale = 1000*1000;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ *timeout_ticks = ns / scale;
+
+ return 0;
+}
+
+static void
+dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(f);
+}
+
+static int
+dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ const char *ethdev_driver = eth_dev->device->driver->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strcmp(ethdev_driver, "net_dpaa2"))
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_attach(eth_dev, i,
+ dpcon, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_eth_eventq_detach(eth_dev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = queue_conf->ev.queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_add_all(dev,
+ eth_dev, queue_conf);
+
+ ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
+ dpcon, queue_conf);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue attach failed: err(%d)", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ ret = dpaa2_eth_eventq_detach(eth_dev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
+
+ ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "Event queue detach failed: err(%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev,
+ uint32_t *caps)
+{
+ const char *name = cdev->data->name;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ if (!strncmp(name, "dpsec-", 6))
+ *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
+ ret);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ for (i = (i - 1); i >= 0 ; i--)
+ dpaa2_sec_eventq_detach(cryptodev, i);
+
+ return ret;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id,
+ const struct rte_event *ev)
+{
+ struct dpaa2_eventdev *priv = dev->data->dev_private;
+ uint8_t ev_qid = ev->queue_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_add_all(dev,
+ cryptodev, ev);
+
+ ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
+ dpcon, ev);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cdev)
+{
+ int i, ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+
+ for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
+ ret = dpaa2_sec_eventq_detach(cdev, i);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed:ret %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev,
+ int32_t rx_queue_id)
+{
+ int ret;
+
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ if (rx_queue_id == -1)
+ return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
+
+ ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
+ if (ret) {
+ DPAA2_EVENTDEV_ERR(
+ "dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
+ const struct rte_cryptodev *cryptodev)
+{
+ EVENTDEV_INIT_FUNC_TRACE();
+
+ RTE_SET_USED(dev);
+ RTE_SET_USED(cryptodev);
+
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_create(uint8_t id,
+ const struct rte_eventdev *dev)
+{
+ RTE_SET_USED(id);
+ RTE_SET_USED(dev);
+
+ /* Nothing to do. Simply return. */
+ return 0;
+}
+
+static int
+dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps)
+{
+ RTE_SET_USED(dev);
+ RTE_SET_USED(eth_dev);
+
+ *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
+ return 0;
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue_same_dest(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ m0 = (struct rte_mbuf *)ev[0].mbuf;
+ qid = rte_event_eth_tx_adapter_txq_get(m0);
+
+ for (i = 0; i < nb_events; i++)
+ m[i] = (struct rte_mbuf *)ev[i].mbuf;
+
+ return rte_eth_tx_burst(m0->port, qid, m, nb_events);
+}
+
+static uint16_t
+dpaa2_eventdev_txa_enqueue(void *port,
+ struct rte_event ev[],
+ uint16_t nb_events)
+{
+ struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
+ uint8_t qid, i;
+
+ RTE_SET_USED(port);
+
+ for (i = 0; i < nb_events; i++) {
+ qid = rte_event_eth_tx_adapter_txq_get(m);
+ rte_eth_tx_burst(m->port, qid, &m, 1);
+ }
+
+ return nb_events;
+}
+
+static struct rte_eventdev_ops dpaa2_eventdev_ops = {
+ .dev_infos_get = dpaa2_eventdev_info_get,
+ .dev_configure = dpaa2_eventdev_configure,
+ .dev_start = dpaa2_eventdev_start,
+ .dev_stop = dpaa2_eventdev_stop,
+ .dev_close = dpaa2_eventdev_close,
+ .queue_def_conf = dpaa2_eventdev_queue_def_conf,
+ .queue_setup = dpaa2_eventdev_queue_setup,
+ .queue_release = dpaa2_eventdev_queue_release,
+ .port_def_conf = dpaa2_eventdev_port_def_conf,
+ .port_setup = dpaa2_eventdev_port_setup,
+ .port_release = dpaa2_eventdev_port_release,
+ .port_link = dpaa2_eventdev_port_link,
+ .port_unlink = dpaa2_eventdev_port_unlink,
+ .timeout_ticks = dpaa2_eventdev_timeout_ticks,
+ .dump = dpaa2_eventdev_dump,
+ .dev_selftest = test_eventdev_dpaa2,
+ .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get,
+ .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add,
+ .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del,
+ .eth_rx_adapter_start = dpaa2_eventdev_eth_start,
+ .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop,
+ .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps,
+ .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create,
+ .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get,
+ .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add,
+ .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del,
+ .crypto_adapter_start = dpaa2_eventdev_crypto_start,
+ .crypto_adapter_stop = dpaa2_eventdev_crypto_stop,
+};