return TEST_SUCCESS;
}
+static int
+adapter_create_free_with_params(void)
+{
+ int err;
+
+ struct rte_event_port_conf rx_p_conf = {
+ .dequeue_depth = 8,
+ .enqueue_depth = 8,
+ .new_event_threshold = 1200,
+ };
+
+ struct rte_event_eth_rx_adapter_params rxa_params = {
+ .event_buf_size = 1024
+ };
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, NULL, NULL);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == -EEXIST, "Expected -EEXIST %d got %d", -EEXIST, err);
+
+ rxa_params.event_buf_size = 0;
+ err = rte_event_eth_rx_adapter_create_with_params(TEST_INST_ID,
+ TEST_DEV_ID, &rx_p_conf, &rxa_params);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL got %d", err);
+
+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
+ TEST_ASSERT(err == 0, "Expected 0 got %d", err);
+
+ err = rte_event_eth_rx_adapter_free(TEST_INST_ID);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+ err = rte_event_eth_rx_adapter_free(1);
+ TEST_ASSERT(err == -EINVAL, "Expected -EINVAL %d got %d", -EINVAL, err);
+
+ return TEST_SUCCESS;
+}
+
static int
adapter_queue_add_del(void)
{
struct rte_event ev;
uint32_t cap;
- struct rte_event_eth_rx_adapter_queue_conf queue_config;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
err = rte_event_eth_rx_adapter_caps_get(TEST_DEV_ID, TEST_ETHDEV_ID,
&cap);
uint16_t port_index, port_index_base, drv_id = 0;
char driver_name[50];
- struct rte_event_eth_rx_adapter_queue_conf queue_config;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
ev.queue_id = 0;
ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
struct rte_event ev;
uint32_t cap;
uint16_t eth_port;
- struct rte_event_eth_rx_adapter_queue_conf queue_config;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
if (!default_params.rx_intr_port_inited)
return 0;
ev.sched_type = RTE_SCHED_TYPE_ATOMIC;
ev.priority = 0;
- struct rte_event_eth_rx_adapter_queue_conf queue_config;
+ struct rte_event_eth_rx_adapter_queue_conf queue_config = {0};
queue_config.rx_queue_flags = 0;
if (default_params.caps &
.teardown = testsuite_teardown,
.unit_test_cases = {
TEST_CASE_ST(NULL, NULL, adapter_create_free),
+ TEST_CASE_ST(NULL, NULL, adapter_create_free_with_params),
TEST_CASE_ST(adapter_create, adapter_free,
adapter_queue_add_del),
TEST_CASE_ST(adapter_create, adapter_free,
expected to fill the ``struct rte_event_eth_rx_adapter_conf structure``
passed to it.
+If the application desires to control the event buffer size, it can use the
+``rte_event_eth_rx_adapter_create_with_params()`` api. The event buffer size is
+specified using ``struct rte_event_eth_rx_adapter_params::event_buf_size``.
+The function is passed the event device to be associated with the adapter
+and port configuration for the adapter to setup an event port if the
+adapter needs to use a service function.
+
Adding Rx Queues to the Adapter Instance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/* Count of events in this buffer */
uint16_t count;
/* Array of events in this buffer */
- struct rte_event events[ETH_EVENT_BUFFER_SIZE];
+ struct rte_event *events;
+ /* size of event buffer */
+ uint16_t events_size;
/* Event enqueue happens from head */
uint16_t head;
/* New packets from rte_eth_rx_burst is enqued from tail */
dropped = 0;
nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id,
buf->last |
- (RTE_DIM(buf->events) & ~buf->last_mask),
+ (buf->events_size & ~buf->last_mask),
buf->count >= BATCH_SIZE ?
buf->count - BATCH_SIZE : 0,
&buf->events[buf->tail],
uint32_t nb_req = buf->tail + BATCH_SIZE;
if (!buf->last) {
- if (nb_req <= RTE_DIM(buf->events))
+ if (nb_req <= buf->events_size)
return true;
if (buf->head >= BATCH_SIZE) {
return 0;
}
-int
-rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
- rte_event_eth_rx_adapter_conf_cb conf_cb,
- void *conf_arg)
+static int
+rxa_create(uint8_t id, uint8_t dev_id,
+ struct rte_event_eth_rx_adapter_params *rxa_params,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg)
{
struct rte_event_eth_rx_adapter *rx_adapter;
+ struct rte_eth_event_enqueue_buffer *buf;
+ struct rte_event *events;
int ret;
int socket_id;
uint16_t i;
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
+
if (conf_cb == NULL)
return -EINVAL;
rte_free(rx_adapter);
return -ENOMEM;
}
+
rte_spinlock_init(&rx_adapter->rx_lock);
+
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
+ /* Rx adapter event buffer allocation */
+ buf = &rx_adapter->event_enqueue_buffer;
+ buf->events_size = rxa_params->event_buf_size;
+
+ events = rte_zmalloc_socket(rx_adapter->mem_name,
+ buf->events_size * sizeof(*events),
+ 0, socket_id);
+ if (events == NULL) {
+ RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n");
+ rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter);
+ return -ENOMEM;
+ }
+
+ rx_adapter->event_enqueue_buffer.events = events;
+
event_eth_rx_adapter[id] = rx_adapter;
+
if (conf_cb == rxa_default_conf_cb)
rx_adapter->default_cb_arg = 1;
return 0;
}
+int
+rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
+ rte_event_eth_rx_adapter_conf_cb conf_cb,
+ void *conf_arg)
+{
+ struct rte_event_eth_rx_adapter_params rxa_params = {0};
+
+ /* use default values for adapter params */
+ rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
+
+ return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
+}
+
+int
+rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ struct rte_event_eth_rx_adapter_params *rxa_params)
+{
+ struct rte_event_port_conf *pc;
+ int ret;
+ struct rte_event_eth_rx_adapter_params temp_params = {0};
+
+ if (port_config == NULL)
+ return -EINVAL;
+
+ /* use default values if rxa_params is NULL */
+ if (rxa_params == NULL) {
+ rxa_params = &temp_params;
+ rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
+ }
+
+ if (rxa_params->event_buf_size == 0)
+ return -EINVAL;
+
+ pc = rte_malloc(NULL, sizeof(*pc), 0);
+ if (pc == NULL)
+ return -ENOMEM;
+
+ *pc = *port_config;
+
+ /* adjust event buff size with BATCH_SIZE used for fetching packets
+ * from NIC rx queues to get full buffer utilization and prevent
+ * unnecessary rollovers.
+ */
+ rxa_params->event_buf_size = RTE_ALIGN(rxa_params->event_buf_size,
+ BATCH_SIZE);
+ rxa_params->event_buf_size += (BATCH_SIZE + BATCH_SIZE);
+
+ ret = rxa_create(id, dev_id, rxa_params, rxa_default_conf_cb, pc);
+ if (ret)
+ rte_free(pc);
+
+ return ret;
+}
+
int
rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
struct rte_event_port_conf *port_config)
if (port_config == NULL)
return -EINVAL;
+
RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL);
pc = rte_malloc(NULL, sizeof(*pc), 0);
if (pc == NULL)
return -ENOMEM;
*pc = *port_config;
+
ret = rte_event_eth_rx_adapter_create_ext(id, dev_id,
rxa_default_conf_cb,
pc);
if (rx_adapter->default_cb_arg)
rte_free(rx_adapter->conf_arg);
rte_free(rx_adapter->eth_devices);
+ rte_free(rx_adapter->event_enqueue_buffer.events);
rte_free(rx_adapter);
event_eth_rx_adapter[id] = NULL;
stats->rx_packets += dev_stats_sum.rx_packets;
stats->rx_enq_count += dev_stats_sum.rx_enq_count;
+
return 0;
}
* The ethernet Rx event adapter's functions are:
* - rte_event_eth_rx_adapter_create_ext()
* - rte_event_eth_rx_adapter_create()
+ * - rte_event_eth_rx_adapter_create_with_params()
* - rte_event_eth_rx_adapter_free()
* - rte_event_eth_rx_adapter_queue_add()
* - rte_event_eth_rx_adapter_queue_del()
*
* The application creates an ethernet to event adapter using
* rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create()
- * functions.
+ * or rte_event_eth_rx_adapter_create_with_params() functions.
* The adapter needs to know which ethernet rx queues to poll for mbufs as well
* as event device parameters such as the event queue identifier, event
* priority and scheduling type that the adapter should use when constructing
*/
};
+/**
+ * A structure to hold adapter config params
+ */
+struct rte_event_eth_rx_adapter_params {
+ uint16_t event_buf_size;
+ /**< size of event buffer for the adapter.
+ * This value is rounded up for better buffer utilization
+ * and performance.
+ */
+};
+
/**
*
* Callback function invoked by the SW adapter before it continues
int rte_event_eth_rx_adapter_create(uint8_t id, uint8_t dev_id,
struct rte_event_port_conf *port_config);
+/**
+ * This is a variant of rte_event_eth_rx_adapter_create() with additional
+ * adapter params specified in ``struct rte_event_eth_rx_adapter_params``.
+ *
+ * @param id
+ * The identifier of the ethernet Rx event adapter.
+ *
+ * @param dev_id
+ * The identifier of the event device to configure.
+ *
+ * @param port_config
+ * Argument of type *rte_event_port_conf* that is passed to the conf_cb
+ * function.
+ *
+ * @param rxa_params
+ * Pointer to struct rte_event_eth_rx_adapter_params.
+ * In case of NULL, default values are used.
+ *
+ * @return
+ * - 0: Success
+ * - <0: Error code on failure
+ */
+__rte_experimental
+int rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
+ struct rte_event_port_conf *port_config,
+ struct rte_event_eth_rx_adapter_params *rxa_params);
+
/**
* Free an event adapter
*
__rte_eventdev_trace_port_setup;
# added in 20.11
rte_event_pmd_pci_probe_named;
+ # added in 21.11
+ rte_event_eth_rx_adapter_create_with_params;
#added in 21.05
rte_event_vector_pool_create;