event/cnxk: add common configuration validation
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Tue, 4 May 2021 00:26:56 +0000 (05:56 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 4 May 2021 03:21:25 +0000 (05:21 +0200)
Add configuration validation, port and queue configuration
functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
drivers/event/cnxk/cnxk_eventdev.c
drivers/event/cnxk/cnxk_eventdev.h

index 3a7053a..3eab1ed 100644 (file)
@@ -28,6 +28,76 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
                                  RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
+int
+cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
+{
+       struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+       uint32_t deq_tmo_ns;
+
+       deq_tmo_ns = conf->dequeue_timeout_ns;
+
+       if (deq_tmo_ns == 0)
+               deq_tmo_ns = dev->min_dequeue_timeout_ns;
+       if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
+           deq_tmo_ns > dev->max_dequeue_timeout_ns) {
+               plt_err("Unsupported dequeue timeout requested");
+               return -EINVAL;
+       }
+
+       if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
+               dev->is_timeout_deq = 1;
+
+       dev->deq_tmo_ns = deq_tmo_ns;
+
+       if (!conf->nb_event_queues || !conf->nb_event_ports ||
+           conf->nb_event_ports > dev->max_event_ports ||
+           conf->nb_event_queues > dev->max_event_queues) {
+               plt_err("Unsupported event queues/ports requested");
+               return -EINVAL;
+       }
+
+       if (conf->nb_event_port_dequeue_depth > 1) {
+               plt_err("Unsupported event port deq depth requested");
+               return -EINVAL;
+       }
+
+       if (conf->nb_event_port_enqueue_depth > 1) {
+               plt_err("Unsupported event port enq depth requested");
+               return -EINVAL;
+       }
+
+       dev->nb_event_queues = conf->nb_event_queues;
+       dev->nb_event_ports = conf->nb_event_ports;
+
+       return 0;
+}
+
+void
+cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
+                       struct rte_event_queue_conf *queue_conf)
+{
+       RTE_SET_USED(event_dev);
+       RTE_SET_USED(queue_id);
+
+       queue_conf->nb_atomic_flows = (1ULL << 20);
+       queue_conf->nb_atomic_order_sequences = (1ULL << 20);
+       queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
+       queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+}
+
+void
+cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
+                      struct rte_event_port_conf *port_conf)
+{
+       struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+
+       RTE_SET_USED(port_id);
+       port_conf->new_event_threshold = dev->max_num_events;
+       port_conf->dequeue_depth = 1;
+       port_conf->enqueue_depth = 1;
+}
+
 int
 cnxk_sso_init(struct rte_eventdev *event_dev)
 {
index 6bdf0b3..59d96a0 100644 (file)
@@ -22,6 +22,7 @@ struct cnxk_sso_evdev {
        uint8_t is_timeout_deq;
        uint8_t nb_event_queues;
        uint8_t nb_event_ports;
+       uint32_t deq_tmo_ns;
        uint32_t min_dequeue_timeout_ns;
        uint32_t max_dequeue_timeout_ns;
        int32_t max_num_events;
@@ -41,5 +42,10 @@ int cnxk_sso_fini(struct rte_eventdev *event_dev);
 int cnxk_sso_remove(struct rte_pci_device *pci_dev);
 void cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
                       struct rte_event_dev_info *dev_info);
+int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
+void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
+                            struct rte_event_queue_conf *queue_conf);
+void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
+                           struct rte_event_port_conf *port_conf);
 
 #endif /* __CNXK_EVENTDEV_H__ */