+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation.
+ * All rights reserved.
+ */
#include <rte_cycles.h>
#include <rte_common.h>
#include <rte_dev.h>
*/
struct eth_rx_poll_entry {
/* Eth port to poll */
- uint8_t eth_dev_id;
+ uint16_t eth_dev_id;
/* Eth rx queue to poll */
uint16_t eth_rx_qid;
};
struct rte_eth_event_enqueue_buffer event_enqueue_buffer;
/* Per adapter stats */
struct rte_event_eth_rx_adapter_stats stats;
- /* Block count, counts upto BLOCK_CNT_THRESHOLD */
+ /* Block count, counts up to BLOCK_CNT_THRESHOLD */
uint16_t enq_block_count;
/* Block start ts */
uint64_t rx_enq_block_start_ts;
int socket_id;
/* Per adapter EAL service */
uint32_t service_id;
+ /* Adapter started flag */
+ uint8_t rxa_started;
} __rte_cache_aligned;
/* Per eth device */
while (1) {
uint16_t q;
- uint8_t d;
+ uint16_t d;
i = (i + 1) % n;
if (i == 0) {
static int
eth_poll_wrr_calc(struct rte_event_eth_rx_adapter *rx_adapter)
{
- uint8_t d;
+ uint16_t d;
uint16_t q;
unsigned int i;
/* Generate array of all queues to poll, the size of this
* array is poll_q
*/
- for (d = 0; d < rte_eth_dev_count(); d++) {
+ RTE_ETH_FOREACH_DEV(d) {
uint16_t nb_rx_queues;
struct eth_device_info *dev_info =
&rx_adapter->eth_devices[d];
nb_rx_queues = dev_info->dev->data->nb_rx_queues;
if (dev_info->rx_queue == NULL)
continue;
+ if (dev_info->internal_event_port)
+ continue;
for (q = 0; q < nb_rx_queues; q++) {
struct eth_rx_queue_info *queue_info =
&dev_info->rx_queue[q];
&rx_adapter->event_enqueue_buffer;
struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
- uint16_t n = rte_event_enqueue_burst(rx_adapter->eventdev_id,
+ uint16_t n = rte_event_enqueue_new_burst(rx_adapter->eventdev_id,
rx_adapter->event_port_id,
buf->events,
buf->count);
static inline void
fill_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t dev_id,
+ uint16_t eth_dev_id,
uint16_t rx_queue_id,
struct rte_mbuf **mbufs,
uint16_t num)
{
uint32_t i;
struct eth_device_info *eth_device_info =
- &rx_adapter->eth_devices[dev_id];
+ &rx_adapter->eth_devices[eth_dev_id];
struct eth_rx_queue_info *eth_rx_queue_info =
ð_device_info->rx_queue[rx_queue_id];
uint32_t rss_mask;
uint32_t rss;
int do_rss;
+ uint64_t ts;
/* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */
rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
+ if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
+ ts = rte_get_tsc_cycles();
+ for (i = 0; i < num; i++) {
+ m = mbufs[i];
+
+ m->timestamp = ts;
+ m->ol_flags |= PKT_RX_TIMESTAMP;
+ }
+ }
+
for (i = 0; i < num; i++) {
m = mbufs[i];
struct rte_event *ev = &events[i];
eth_rx_queue_info->flow_id &
eth_rx_queue_info->flow_id_mask;
flow_id |= rss & ~eth_rx_queue_info->flow_id_mask;
-
ev->flow_id = flow_id;
ev->op = RTE_EVENT_OP_NEW;
ev->sched_type = sched_type;
* the hypervisor's switching layer where adjustments can be made to deal with
* it.
*/
-static inline uint32_t
+static inline void
eth_rx_poll(struct rte_event_eth_rx_adapter *rx_adapter)
{
uint32_t num_queue;
for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
unsigned int poll_idx = rx_adapter->wrr_sched[wrr_pos];
uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
- uint8_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
+ uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
/* Don't do a batch dequeue from the rx queue if there isn't
* enough space in the enqueue buffer.
*/
if (buf->count >= BATCH_SIZE)
flush_event_buffer(rx_adapter);
- if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count))
- break;
+ if (BATCH_SIZE > (ETH_EVENT_BUFFER_SIZE - buf->count)) {
+ rx_adapter->wrr_pos = wrr_pos;
+ return;
+ }
stats->rx_poll_count++;
n = rte_eth_rx_burst(d, qid, mbufs, BATCH_SIZE);
if (nb_rx > max_nb_rx) {
rx_adapter->wrr_pos =
(wrr_pos + 1) % rx_adapter->wrr_len;
- return nb_rx;
+ break;
}
}
wrr_pos = 0;
}
- return nb_rx;
+ if (buf->count >= BATCH_SIZE)
+ flush_event_buffer(rx_adapter);
}
static int
event_eth_rx_adapter_service_func(void *args)
{
struct rte_event_eth_rx_adapter *rx_adapter = args;
- struct rte_eth_event_enqueue_buffer *buf;
- buf = &rx_adapter->event_enqueue_buffer;
if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0)
return 0;
- if (eth_rx_poll(rx_adapter) == 0 && buf->count)
- flush_event_buffer(rx_adapter);
+ if (!rx_adapter->rxa_started) {
+ return 0;
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
+ eth_rx_poll(rx_adapter);
rte_spinlock_unlock(&rx_adapter->rx_lock);
return 0;
}
if (ret) {
RTE_EDEV_LOG_ERR("failed to configure event dev %u\n",
dev_id);
- if (started)
- rte_event_dev_start(dev_id);
+ if (started) {
+ if (rte_event_dev_start(dev_id))
+ return -EIO;
+ }
return ret;
}
conf->event_port_id = port_id;
conf->max_nb_rx = 128;
if (started)
- rte_event_dev_start(dev_id);
+ ret = rte_event_dev_start(dev_id);
rx_adapter->default_cb_arg = 1;
return ret;
}
}
static int add_rx_queue(struct rte_event_eth_rx_adapter *rx_adapter,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
/* if start check for num dev queues */
if (start && !dev_info->nb_dev_queues)
&rte_eth_devices[i]);
}
- if (use_service)
+ if (use_service) {
+ rte_spinlock_lock(&rx_adapter->rx_lock);
+ rx_adapter->rxa_started = start;
rte_service_runstate_set(rx_adapter->service_id, start);
+ rte_spinlock_unlock(&rx_adapter->rx_lock);
+ }
return 0;
}
struct rte_event_eth_rx_adapter *rx_adapter;
int ret;
int socket_id;
- uint8_t i;
+ uint16_t i;
char mem_name[ETH_RX_ADAPTER_SERVICE_NAME_LEN];
const uint8_t default_rss_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
rx_adapter->conf_arg = conf_arg;
strcpy(rx_adapter->mem_name, mem_name);
rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name,
- rte_eth_dev_count() *
+ /* FIXME: incompatible with hotplug */
+ rte_eth_dev_count_total() *
sizeof(struct eth_device_info), 0,
socket_id);
rte_convert_rss_key((const uint32_t *)default_rss_key,
return -ENOMEM;
}
rte_spinlock_init(&rx_adapter->rx_lock);
- for (i = 0; i < rte_eth_dev_count(); i++)
+ RTE_ETH_FOREACH_DEV(i)
rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
event_eth_rx_adapter[id] = rx_adapter;
int
rte_event_eth_rx_adapter_queue_add(uint8_t id,
- uint8_t eth_dev_id,
+ uint16_t eth_dev_id,
int32_t rx_queue_id,
const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
{
&cap);
if (ret) {
RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8
- "eth port %" PRIu8, id, eth_dev_id);
+ "eth port %" PRIu16, id, eth_dev_id);
return ret;
}
&& (queue_conf->rx_queue_flags &
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID)) {
RTE_EDEV_LOG_ERR("Flow ID override is not supported,"
- " eth port: %" PRIu8 " adapter id: %" PRIu8,
+ " eth port: %" PRIu16 " adapter id: %" PRIu8,
eth_dev_id, id);
return -EINVAL;
}
if ((cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0 &&
(rx_queue_id != -1)) {
RTE_EDEV_LOG_ERR("Rx queues can only be connected to single "
- "event queue id %u eth port %u", id, eth_dev_id);
+ "event queue, eth port: %" PRIu16 " adapter id: %"
+ PRIu8, eth_dev_id, id);
return -EINVAL;
}
&rte_eth_devices[eth_dev_id],
rx_queue_id, queue_conf);
if (ret == 0) {
+ dev_info->internal_event_port = 1;
update_queue_info(rx_adapter,
&rx_adapter->eth_devices[eth_dev_id],
rx_queue_id,
}
} else {
rte_spinlock_lock(&rx_adapter->rx_lock);
+ dev_info->internal_event_port = 0;
ret = init_service(rx_adapter, id);
if (ret == 0)
ret = add_rx_queue(rx_adapter, eth_dev_id, rx_queue_id,
}
int
-rte_event_eth_rx_adapter_queue_del(uint8_t id, uint8_t eth_dev_id,
+rte_event_eth_rx_adapter_queue_del(uint8_t id, uint16_t eth_dev_id,
int32_t rx_queue_id)
{
int ret = 0;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
memset(stats, 0, sizeof(*stats));
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_get == NULL)
return -EINVAL;
dev = &rte_eventdevs[rx_adapter->eventdev_id];
- for (i = 0; i < rte_eth_dev_count(); i++) {
+ RTE_ETH_FOREACH_DEV(i) {
dev_info = &rx_adapter->eth_devices[i];
if (dev_info->internal_event_port == 0 ||
dev->dev_ops->eth_rx_adapter_stats_reset == NULL)