struct dpaa2_queue tx_queue[DPAA2_DPCI_MAX_QUEUES];
};
+struct dpaa2_dpcon_dev {
+ TAILQ_ENTRY(dpaa2_dpcon_dev) next;
+ struct fsl_mc_io dpcon;
+ uint16_t token;
+ rte_atomic16_t in_use;
+ uint32_t dpcon_id;
+ uint16_t qbman_ch_id;
+ uint8_t num_priorities;
+ uint8_t channel_index;
+};
+
/*! Global MCP list */
extern void *(*rte_mcp_ptr_list);
int
dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
int qp_id,
- uint16_t dpcon_id,
+ struct dpaa2_dpcon_dev *dpcon,
const struct rte_event *event)
{
struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
struct dpseci_rx_queue_cfg cfg;
+ uint8_t priority;
int ret;
if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
else
return -EINVAL;
+ priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / event->priority) *
+ (dpcon->num_priorities - 1);
+
memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
cfg.options = DPSECI_QUEUE_OPT_DEST;
cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
- cfg.dest_cfg.dest_id = dpcon_id;
- cfg.dest_cfg.priority = event->priority;
+ cfg.dest_cfg.dest_id = dpcon->dpcon_id;
+ cfg.dest_cfg.priority = priority;
cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
cfg.user_ctx = (size_t)(qp);
int
dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
int qp_id,
- uint16_t dpcon_id,
+ struct dpaa2_dpcon_dev *dpcon,
const struct rte_event *event);
int dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = queue_conf->ev.queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int i, ret;
EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
ret = dpaa2_eth_eventq_attach(eth_dev, i,
- dpcon_id, queue_conf);
+ dpcon, queue_conf);
if (ret) {
DPAA2_EVENTDEV_ERR(
"Event queue attach failed: err(%d)", ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = queue_conf->ev.queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int ret;
EVENTDEV_INIT_FUNC_TRACE();
eth_dev, queue_conf);
ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
- dpcon_id, queue_conf);
+ dpcon, queue_conf);
if (ret) {
DPAA2_EVENTDEV_ERR(
"Event queue attach failed: err(%d)", ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = ev->queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int i, ret;
EVENTDEV_INIT_FUNC_TRACE();
for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
- ret = dpaa2_sec_eventq_attach(cryptodev, i,
- dpcon_id, ev);
+ ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
if (ret) {
DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
ret);
{
struct dpaa2_eventdev *priv = dev->data->dev_private;
uint8_t ev_qid = ev->queue_id;
- uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id;
+ struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
int ret;
EVENTDEV_INIT_FUNC_TRACE();
cryptodev, ev);
ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
- dpcon_id, ev);
+ dpcon, ev);
if (ret) {
DPAA2_EVENTDEV_ERR(
"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
* the ethdev to eventdev with DPAA2 devices.
*/
-struct dpaa2_dpcon_dev {
- TAILQ_ENTRY(dpaa2_dpcon_dev) next;
- struct fsl_mc_io dpcon;
- uint16_t token;
- rte_atomic16_t in_use;
- uint32_t dpcon_id;
- uint16_t qbman_ch_id;
- uint8_t num_priorities;
- uint8_t channel_index;
-};
-
struct dpaa2_eventq {
/* DPcon device */
struct dpaa2_dpcon_dev *dpcon;
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
- uint16_t dpcon_id,
+ struct dpaa2_dpcon_dev *dpcon,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
struct dpaa2_dev_priv *eth_priv = dev->data->dev_private;
struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id];
uint8_t flow_id = dpaa2_ethq->flow_id;
struct dpni_queue cfg;
- uint8_t options;
+ uint8_t options, priority;
int ret;
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
else
return -EINVAL;
+ priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) *
+ (dpcon->num_priorities - 1);
+
memset(&cfg, 0, sizeof(struct dpni_queue));
options = DPNI_QUEUE_OPT_DEST;
cfg.destination.type = DPNI_DEST_DPCON;
- cfg.destination.id = dpcon_id;
- cfg.destination.priority = queue_conf->ev.priority;
+ cfg.destination.id = dpcon->dpcon_id;
+ cfg.destination.priority = priority;
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
options |= DPNI_QUEUE_OPT_HOLD_ACTIVE;
int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int eth_rx_queue_id,
- uint16_t dpcon_id,
+ struct dpaa2_dpcon_dev *dpcon,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,