When timvf is used as Event timer adapter event schedule type
``RTE_SCHED_TYPE_PARALLEL`` is not supported.
-Max mempool size
-~~~~~~~~~~~~~~~~
+Max number of events
+~~~~~~~~~~~~~~~~~~~~
-Max mempool size when using OCTEON TX Eventdev (SSO) should be limited to 128K.
-When running dpdk-test-eventdev on OCTEON TX the application can limit the
-number of mbufs by using the option ``--pool_sz 131072``
+Max number of events in OCTEON TX Eventdev (SSO) are only limited by DRAM size
+and they can be configured by passing limits to kernel bootargs as follows:
+
+.. code-block:: console
+
+ ssopf.max_events=4194304
+
+The same can be verified by looking at the following sysfs entry:
+
+.. code-block:: console
+
+ # cat /sys/module/ssopf/parameters/max_events
+ 4194304
+
+The maximum number of events that can be added to SSO by the event adapters such
+as (Rx/Timer) should be limited to the above configured value.
const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
- int ret = 0;
const struct octeontx_nic *nic = eth_dev->data->dev_private;
struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ uint16_t free_idx = UINT16_MAX;
+ struct octeontx_rxq *rxq;
pki_mod_qos_t pki_qos;
- RTE_SET_USED(dev);
+ uint8_t found = false;
+ int i, ret = 0;
+ void *old_ptr;
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
if (ret)
return -EINVAL;
- if (rx_queue_id >= 0)
- return -EINVAL;
-
if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
return -ENOTSUP;
+ /* eth_octeontx only supports one rq. */
+ rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id;
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ /* Add rxq pool to list of used pools and reduce available events. */
+ for (i = 0; i < edev->rxq_pools; i++) {
+ if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) {
+ edev->rxq_pool_rcnt[i]++;
+ found = true;
+ break;
+ } else if (free_idx == UINT16_MAX &&
+ edev->rxq_pool_array[i] == 0) {
+ free_idx = i;
+ }
+ }
+
+ if (!found) {
+ uint16_t idx;
+
+ if (edev->available_events < rxq->pool->size) {
+ ssovf_log_err(
+ "Max available events %"PRIu32" requested events in rxq pool %"PRIu32"",
+ edev->available_events, rxq->pool->size);
+ return -ENOMEM;
+ }
+
+ if (free_idx != UINT16_MAX) {
+ idx = free_idx;
+ } else {
+ old_ptr = edev->rxq_pool_array;
+ edev->rxq_pools++;
+ edev->rxq_pool_array = rte_realloc(
+ edev->rxq_pool_array,
+ sizeof(uint64_t) * edev->rxq_pools, 0);
+ if (edev->rxq_pool_array == NULL) {
+ edev->rxq_pools--;
+ edev->rxq_pool_array = old_ptr;
+ return -ENOMEM;
+ }
+
+ old_ptr = edev->rxq_pool_rcnt;
+ edev->rxq_pool_rcnt = rte_realloc(
+ edev->rxq_pool_rcnt,
+ sizeof(uint8_t) * edev->rxq_pools, 0);
+ if (edev->rxq_pool_rcnt == NULL) {
+ edev->rxq_pools--;
+ edev->rxq_pool_rcnt = old_ptr;
+ return -ENOMEM;
+ }
+ idx = edev->rxq_pools - 1;
+ }
+
+ edev->rxq_pool_array[idx] = (uintptr_t)rxq->pool;
+ edev->rxq_pool_rcnt[idx] = 1;
+ edev->available_events -= rxq->pool->size;
+ }
+
memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
pki_qos.port_type = 0;
ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
{
- int ret = 0;
const struct octeontx_nic *nic = eth_dev->data->dev_private;
+ struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
+ struct octeontx_rxq *rxq;
pki_del_qos_t pki_qos;
- RTE_SET_USED(dev);
+ uint8_t found = false;
+ int i, ret = 0;
+
+ rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id;
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ for (i = 0; i < edev->rxq_pools; i++) {
+ if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ edev->rxq_pool_rcnt[i]--;
+ if (edev->rxq_pool_rcnt[i] == 0)
+ edev->rxq_pool_array[i] = 0;
+ edev->available_events += rxq->pool->size;
+ }
ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
if (ret)
}
eventdev->dev_ops = &ssovf_ops;
+ timvf_set_eventdevice(eventdev);
+
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
ssovf_fastpath_fns_set(eventdev);
edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
edev->max_num_events = info.max_num_events;
- ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
- info.min_deq_timeout_ns, info.max_deq_timeout_ns,
- info.max_num_events);
+ edev->available_events = info.max_num_events;
+
+ ssovf_log_dbg("min_deq_tmo=%" PRId64 " max_deq_tmo=%" PRId64
+ " max_evts=%d",
+ info.min_deq_timeout_ns, info.max_deq_timeout_ns,
+ info.max_num_events);
if (!edev->max_event_ports || !edev->max_event_queues) {
ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
uint32_t min_deq_timeout_ns;
uint32_t max_deq_timeout_ns;
int32_t max_num_events;
+ uint32_t available_events;
+ uint16_t rxq_pools;
+ uint64_t *rxq_pool_array;
+ uint8_t *rxq_pool_rcnt;
+ uint16_t tim_ring_cnt;
+ uint16_t *tim_ring_ids;
} __rte_cache_aligned;
/* Event port aka HWS */
* Copyright(c) 2017 Cavium, Inc
*/
+#include "ssovf_evdev.h"
#include "timvf_evdev.h"
RTE_LOG_REGISTER(otx_logtype_timvf, pmd.event.octeontx.timer, NOTICE);
+static struct rte_eventdev *event_dev;
+
struct __rte_packed timvf_mbox_dev_info {
uint64_t ring_active[4];
uint64_t clk_freq;
static int
timvf_ring_create(struct rte_event_timer_adapter *adptr)
{
- char pool_name[25];
- int ret;
- uint8_t tim_ring_id;
- uint64_t nb_timers;
struct rte_event_timer_adapter_conf *rcfg = &adptr->data->conf;
+ uint16_t free_idx = UINT16_MAX;
+ unsigned int mp_flags = 0;
+ struct ssovf_evdev *edev;
struct timvf_ring *timr;
const char *mempool_ops;
- unsigned int mp_flags = 0;
+ uint8_t tim_ring_id;
+ char pool_name[25];
+ int i, ret;
tim_ring_id = timvf_get_ring();
if (tim_ring_id == UINT8_MAX)
return -ENODEV;
+ edev = ssovf_pmd_priv(event_dev);
timr = rte_zmalloc("octeontx_timvf_priv",
sizeof(struct timvf_ring), 0);
if (timr == NULL)
timr->nb_bkts = (timr->max_tout / timr->tck_nsec);
timr->vbar0 = timvf_bar(timr->tim_ring_id, 0);
timr->bkt_pos = (uint8_t *)timr->vbar0 + TIM_VRING_REL;
- nb_timers = rcfg->nb_timers;
+ timr->nb_timers = rcfg->nb_timers;
timr->get_target_bkt = bkt_mod;
- timr->nb_chunks = nb_timers / nb_chunk_slots;
+ if (edev->available_events < timr->nb_timers) {
+ timvf_log_err(
+ "Max available events %"PRIu32" requested timer events %"PRIu64"",
+ edev->available_events, timr->nb_timers);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < edev->tim_ring_cnt; i++) {
+ if (edev->tim_ring_ids[i] == UINT16_MAX)
+ free_idx = i;
+ }
+
+ if (free_idx == UINT16_MAX) {
+ void *old_ptr;
+
+ edev->tim_ring_cnt++;
+ old_ptr = edev->tim_ring_ids;
+ edev->tim_ring_ids =
+ rte_realloc(edev->tim_ring_ids,
+ sizeof(uint16_t) * edev->tim_ring_cnt, 0);
+ if (edev->tim_ring_ids == NULL) {
+ edev->tim_ring_ids = old_ptr;
+ edev->tim_ring_cnt--;
+ return -ENOMEM;
+ }
+
+ edev->available_events -= timr->nb_timers;
+ } else {
+ edev->tim_ring_ids[free_idx] = tim_ring_id;
+ edev->available_events -= timr->nb_timers;
+ }
+
+ timr->nb_chunks = timr->nb_timers / nb_chunk_slots;
/* Try to optimize the bucket parameters. */
if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES)
timvf_ring_free(struct rte_event_timer_adapter *adptr)
{
struct timvf_ring *timr = adptr->data->adapter_priv;
+ struct ssovf_evdev *edev;
+ int i;
+
+ edev = ssovf_pmd_priv(event_dev);
+ for (i = 0; i < edev->tim_ring_cnt; i++) {
+ if (edev->tim_ring_ids[i] == timr->tim_ring_id) {
+ edev->available_events += timr->nb_timers;
+ edev->tim_ring_ids[i] = UINT16_MAX;
+ break;
+ }
+ }
rte_mempool_free(timr->chunk_pool);
rte_free(timr->bkt);
*ops = &timvf_ops;
return 0;
}
+
+void
+timvf_set_eventdevice(struct rte_eventdev *dev)
+{
+ event_dev = dev;
+}
void *bkt_pos;
uint64_t max_tout;
uint64_t nb_chunks;
+ uint64_t nb_timers;
enum timvf_clk_src clk_src;
uint16_t tim_ring_id;
} __rte_cache_aligned;
struct rte_event_timer **tim, const uint64_t timeout_tick,
const uint16_t nb_timers);
void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
+void timvf_set_eventdevice(struct rte_eventdev *dev);
#endif /* __TIMVF_EVDEV_H__ */