return err;
}
+static int
+adapter_create_with_params(void)
+{
+ int err;
+ struct rte_event_dev_info dev_info;
+ struct rte_event_port_conf rx_p_conf;
+ struct rte_event_eth_rx_adapter_params rxa_params;
+
+ memset(&rx_p_conf, 0, sizeof(rx_p_conf));
+
+ err = rte_event_dev_info_get(TEST_DEV_ID, &dev_info);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ rx_p_conf.new_event_threshold = dev_info.max_num_events;
+ rx_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;
+ rx_p_conf.enqueue_depth = dev_info.max_event_port_enqueue_depth;
+
+ rxa_params.use_queue_event_buf = false;
+ rxa_params.event_buf_size = 0;
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ rxa_params.use_queue_event_buf = true;
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == -EEXIST, "Expected -EEXIST got %d", err);
+
+ return TEST_SUCCESS;
+}
+
+static int
+adapter_queue_event_buf_test(void)
+{
+ int err;
+ struct rte_event ev;
+ uint32_t cap;
+
+ struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
+
+ err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
+ &cap);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ ev.queue_id = 0;
+ ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
+ ev.priority = 0;
+
+ queue_config.rx_queue_flags = 0;
+ if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID) {
+ ev.flow_id = 1;
+ queue_config.rx_queue_flags =
+ RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID;
+ }
+ queue_config.ev = ev;
+ queue_config.servicing_weight = 1;
+ queue_config.event_buf_size = 0;
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0,
+ &queue_config);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ queue_config.event_buf_size = 1024;
+
+ err = rte_event_eth_rx_adapter_queue_add(TEST_INST_ID,
+ TEST_ETHDEV_ID, 0,
+ &queue_config);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_queue_del(TEST_INST_ID,
+ TEST_ETHDEV_ID,
+ 0);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ return TEST_SUCCESS;
+}
+
static void
adapter_free(void)
{
TEST_CASE_ST(adapter_create, adapter_free, adapter_start_stop),
TEST_CASE_ST(adapter_create, adapter_free, adapter_stats),
TEST_CASE_ST(adapter_create, adapter_free, adapter_queue_conf),
+ TEST_CASE_ST(adapter_create_with_params, adapter_free,
+ adapter_queue_event_buf_test),
TEST_CASES_END() /**< NULL terminate unit test array */
}
};
uint8_t rss_key_be[RSS_KEY_SIZE];
/* Event device identifier */
uint8_t eventdev_id;
- /* Per ethernet device structure */
- struct eth_device_info *eth_devices;
/* Event port identifier */
uint8_t event_port_id;
+ /* Flag indicating per rxq event buffer */
+ bool use_queue_event_buf;
+ /* Per ethernet device structure */
+ struct eth_device_info *eth_devices;
/* Lock to serialize config updates with service function */
rte_spinlock_t rx_lock;
/* Max mbufs processed in any service function invocation */
uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */
uint64_t event;
struct eth_rx_vector_data vector_data;
+ struct rte_eth_event_enqueue_buffer *event_buf;
};
static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
return id < RTE_EVENT_ETH_RX_ADAPTER_MAX_INSTANCE;
}
+static inline struct rte_eth_event_enqueue_buffer *
+rxa_event_buf_get(struct rte_event_eth_rx_adapter *rx_adapter,
+ uint16_t eth_dev_id, uint16_t rx_queue_id)
+{
+ if (rx_adapter->use_queue_event_buf) {
+ struct eth_device_info *dev_info =
+ &rx_adapter->eth_devices[eth_dev_id];
+ return dev_info->rx_queue[rx_queue_id].event_buf;
+ } else
+ return &rx_adapter->event_enqueue_buffer;
+}
+
#define RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, retval) do { \
if (!rxa_validate_id(id)) { \
RTE_EDEV_LOG_ERR("Invalid eth Rx adapter id = %d\n", id); \
/* Enqueue buffered events to event device */
static inline uint16_t
-rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
+rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
+ struct rte_eth_event_enqueue_buffer *buf)
{
- struct rte_eth_event_enqueue_buffer *buf =
- &rx_adapter->event_enqueue_buffer;
struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
uint16_t count = buf->last ? buf->last - buf->head : buf->count;
uint16_t eth_dev_id,
uint16_t rx_queue_id,
struct rte_mbuf **mbufs,
- uint16_t num)
+ uint16_t num,
+ struct rte_eth_event_enqueue_buffer *buf)
{
uint32_t i;
struct eth_device_info *dev_info =
&rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
&dev_info->rx_queue[rx_queue_id];
- struct rte_eth_event_enqueue_buffer *buf =
- &rx_adapter->event_enqueue_buffer;
uint16_t new_tail = buf->tail;
uint64_t event = eth_rx_queue_info->event;
uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
uint16_t queue_id,
uint32_t rx_count,
uint32_t max_rx,
- int *rxq_empty)
+ int *rxq_empty,
+ struct rte_eth_event_enqueue_buffer *buf)
{
struct rte_mbuf *mbufs[BATCH_SIZE];
- struct rte_eth_event_enqueue_buffer *buf =
- &rx_adapter->event_enqueue_buffer;
struct rte_event_eth_rx_adapter_stats *stats =
&rx_adapter->stats;
uint16_t n;
*/
while (rxa_pkt_buf_available(buf)) {
if (buf->count >= BATCH_SIZE)
- rxa_flush_event_buffer(rx_adapter);
+ rxa_flush_event_buffer(rx_adapter, buf);
stats->rx_poll_count++;
n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
*rxq_empty = 1;
break;
}
- rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
+ rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
nb_rx += n;
if (rx_count + nb_rx > max_rx)
break;
}
if (buf->count > 0)
- rxa_flush_event_buffer(rx_adapter);
+ rxa_flush_event_buffer(rx_adapter, buf);
return nb_rx;
}
ring_lock = &rx_adapter->intr_ring_lock;
if (buf->count >= BATCH_SIZE)
- rxa_flush_event_buffer(rx_adapter);
+ rxa_flush_event_buffer(rx_adapter, buf);
while (rxa_pkt_buf_available(buf)) {
struct eth_device_info *dev_info;
continue;
n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
rx_adapter->max_nb_rx,
- &rxq_empty);
+ &rxq_empty, buf);
nb_rx += n;
enq_buffer_full = !rxq_empty && n == 0;
} else {
n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
rx_adapter->max_nb_rx,
- &rxq_empty);
+ &rxq_empty, buf);
rx_adapter->qd_valid = !rxq_empty;
nb_rx += n;
if (nb_rx > rx_adapter->max_nb_rx)
{
uint32_t num_queue;
uint32_t nb_rx = 0;
- struct rte_eth_event_enqueue_buffer *buf;
+ struct rte_eth_event_enqueue_buffer *buf = NULL;
uint32_t wrr_pos;
uint32_t max_nb_rx;
wrr_pos = rx_adapter->wrr_pos;
max_nb_rx = rx_adapter->max_nb_rx;
- buf = &rx_adapter->event_enqueue_buffer;
/* Iterate through a WRR sequence */
for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+ buf = rxa_event_buf_get(rx_adapter, d, qid);
+
/* Don't do a batch dequeue from the rx queue if there isn't
* enough space in the enqueue buffer.
*/
if (buf->count >= BATCH_SIZE)
- rxa_flush_event_buffer(rx_adapter);
+ rxa_flush_event_buffer(rx_adapter, buf);
if (!rxa_pkt_buf_available(buf)) {
- rx_adapter->wrr_pos = wrr_pos;
- return nb_rx;
+ if (rx_adapter->use_queue_event_buf)
+ goto poll_next_entry;
+ else {
+ rx_adapter->wrr_pos = wrr_pos;
+ return nb_rx;
+ }
}
nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
- NULL);
+ NULL, buf);
if (nb_rx > max_nb_rx) {
rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
break;
}
+poll_next_entry:
if (++wrr_pos == rx_adapter->wrr_len)
wrr_pos = 0;
}
rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
{
struct rte_event_eth_rx_adapter *rx_adapter = arg;
- struct rte_eth_event_enqueue_buffer *buf =
- &rx_adapter->event_enqueue_buffer;
+ struct rte_eth_event_enqueue_buffer *buf = NULL;
struct rte_event *ev;
+ buf = rxa_event_buf_get(rx_adapter, vec->port, vec->queue);
+
if (buf->count)
- rxa_flush_event_buffer(rx_adapter);
+ rxa_flush_event_buffer(rx_adapter, buf);
if (vec->vector_ev->nb_elem == 0)
return;
rx_adapter->num_rx_intr -= intrq;
dev_info->nb_rx_intr -= intrq;
dev_info->nb_shared_intr -= intrq && sintrq;
+ if (rx_adapter->use_queue_event_buf) {
+ struct rte_eth_event_enqueue_buffer *event_buf =
+ dev_info->rx_queue[rx_queue_id].event_buf;
+ rte_free(event_buf->events);
+ rte_free(event_buf);
+ dev_info->rx_queue[rx_queue_id].event_buf = NULL;
+ }
}
-static void
+static int
rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
struct eth_device_info *dev_info,
int32_t rx_queue_id,
int intrq;
int sintrq;
struct rte_event *qi_ev;
+ struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL;
+ uint16_t eth_dev_id = dev_info->dev->data->port_id;
+ int ret;
if (rx_queue_id == -1) {
uint16_t nb_rx_queues;
uint16_t i;
nb_rx_queues = dev_info->dev->data->nb_rx_queues;
- for (i = 0; i < nb_rx_queues; i++)
- rxa_add_queue(rx_adapter, dev_info, i, conf);
- return;
+ for (i = 0; i < nb_rx_queues; i++) {
+ ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
pollq = rxa_polled_queue(dev_info, rx_queue_id);
dev_info->next_q_idx = 0;
}
}
+
+ if (!rx_adapter->use_queue_event_buf)
+ return 0;
+
+ new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
+ sizeof(*new_rx_buf), 0,
+ rte_eth_dev_socket_id(eth_dev_id));
+ if (new_rx_buf == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
+ "dev_id: %d queue_id: %d",
+ eth_dev_id, rx_queue_id);
+ return -ENOMEM;
+ }
+
+ new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
+ new_rx_buf->events_size += (2 * BATCH_SIZE);
+ new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
+ sizeof(struct rte_event) *
+ new_rx_buf->events_size, 0,
+ rte_eth_dev_socket_id(eth_dev_id));
+ if (new_rx_buf->events == NULL) {
+ rte_free(new_rx_buf);
+ RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
+ "dev_id: %d queue_id: %d",
+ eth_dev_id, rx_queue_id);
+ return -ENOMEM;
+ }
+
+ queue_info->event_buf = new_rx_buf;
+
+ return 0;
}
static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
temp_conf.servicing_weight = 1;
}
queue_conf = &temp_conf;
+
+ if (queue_conf->servicing_weight == 0 &&
+ rx_adapter->use_queue_event_buf) {
+
+ RTE_EDEV_LOG_ERR("Use of queue level event buffer "
+ "not supported for interrupt queues "
+ "dev_id: %d queue_id: %d",
+ eth_dev_id, rx_queue_id);
+ return -EINVAL;
+ }
}
nb_rx_queues = dev_info->dev->data->nb_rx_queues;
- rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+ ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
+ if (ret)
+ goto err_free_rxqueue;
rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
rte_free(rx_adapter->eth_rx_poll);
rte_free(rx_poll);
rte_free(rx_wrr);
- return 0;
+ return ret;
}
static int
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
/* Rx adapter event buffer allocation */
- buf = &rx_adapter->event_enqueue_buffer;
- buf->events_size = rxa_params->event_buf_size;
-
- events = rte_zmalloc_socket(rx_adapter->mem_name,
- buf->events_size * sizeof(*events),
- 0, socket_id);
- if (events == NULL) {
- RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n");
- rte_free(rx_adapter->eth_devices);
- rte_free(rx_adapter);
- return -ENOMEM;
- }
+ rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
+
+ if (!rx_adapter->use_queue_event_buf) {
+ buf = &rx_adapter->event_enqueue_buffer;
+ buf->events_size = rxa_params->event_buf_size;
+
+ events = rte_zmalloc_socket(rx_adapter->mem_name,
+ buf->events_size * sizeof(*events),
+ 0, socket_id);
+ if (events == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to allocate memory "
+ "for adapter event buffer");
+ rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter);
+ return -ENOMEM;
+ }
- rx_adapter->event_enqueue_buffer.events = events;
+ rx_adapter->event_enqueue_buffer.events = events;
+ }
event_eth_rx_adapter[id] = rx_adapter;
/* use default values for adapter params */
rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
+ rxa_params.use_queue_event_buf = false;
return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
}
if (port_config == NULL)
return -EINVAL;
- /* use default values if rxa_params is NULL */
if (rxa_params == NULL) {
+ /* use default values if rxa_params is NULL */
rxa_params = &temp_params;
rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
- }
-
- if (rxa_params->event_buf_size == 0)
+ rxa_params->use_queue_event_buf = false;
+ } else if ((!rxa_params->use_queue_event_buf &&
+ rxa_params->event_buf_size == 0) ||
+ (rxa_params->use_queue_event_buf &&
+ rxa_params->event_buf_size != 0)) {
+ RTE_EDEV_LOG_ERR("Invalid adapter params\n");
return -EINVAL;
+ } else if (!rxa_params->use_queue_event_buf) {
+ /* adjust event buff size with BATCH_SIZE used for fetching
+ * packets from NIC rx queues to get full buffer utilization
+ * and prevent unnecessary rollovers.
+ */
+
+ rxa_params->event_buf_size =
+ RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
+ rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
+ }
pc = rte_malloc(NULL, sizeof(*pc), 0);
if (pc == NULL)
*pc = *port_config;
- /* adjust event buff size with BATCH_SIZE used for fetching packets
- * from NIC rx queues to get full buffer utilization and prevent
- * unnecessary rollovers.
- */
- rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size,
- BATCH_SIZE);
- rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
-
ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
if (ret)
rte_free(pc);
if (rx_adapter->default_cb_arg)
rte_free(rx_adapter->conf_arg);
rte_free(rx_adapter->eth_devices);
- rte_free(rx_adapter->event_enqueue_buffer.events);
+ if (!rx_adapter->use_queue_event_buf)
+ rte_free(rx_adapter->event_enqueue_buffer.events);
rte_free(rx_adapter);
event_eth_rx_adapter[id] = NULL;
return -EINVAL;
}
+ if ((rx_adapter->use_queue_event_buf &&
+ queue_conf->event_buf_size == 0) ||
+ (!rx_adapter->use_queue_event_buf &&
+ queue_conf->event_buf_size != 0)) {
+ RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
+ return -EINVAL;
+ }
+
dev_info = &rx_adapter->eth_devices[eth_dev_id];
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {