#include <rte_service_component.h>
#include <rte_ethdev.h>
-#include "rte_eventdev_pmd.h"
+#include "eventdev_pmd.h"
+#include "rte_eventdev_trace.h"
#include "rte_event_eth_tx_adapter.h"
#define TXA_BATCH_SIZE 32
return ret;
}
- pc->disable_implicit_release = 0;
+ pc->event_port_cfg = 0;
ret = rte_event_port_setup(dev_id, port_id, pc);
if (ret) {
RTE_EDEV_LOG_ERR("failed to setup event port %u\n",
stats->tx_dropped += unsent - sent;
}
+static uint16_t
+txa_process_event_vector(struct txa_service_data *txa,
+ struct rte_event_vector *vec)
+{
+ struct txa_service_queue_info *tqi;
+ uint16_t port, queue, nb_tx = 0;
+ struct rte_mbuf **mbufs;
+ int i;
+
+ mbufs = (struct rte_mbuf **)vec->mbufs;
+ if (vec->attr_valid) {
+ port = vec->port;
+ queue = vec->queue;
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free_bulk(mbufs, vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+ return 0;
+ }
+ for (i = 0; i < vec->nb_elem; i++) {
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
+ mbufs[i]);
+ }
+ } else {
+ for (i = 0; i < vec->nb_elem; i++) {
+ port = mbufs[i]->port;
+ queue = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free(mbufs[i]);
+ continue;
+ }
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf,
+ mbufs[i]);
+ }
+ }
+ rte_mempool_put(rte_mempool_from_obj(vec), vec);
+
+ return nb_tx;
+}
+
static void
txa_service_tx(struct txa_service_data *txa, struct rte_event *ev,
uint32_t n)
nb_tx = 0;
for (i = 0; i < n; i++) {
- struct rte_mbuf *m;
uint16_t port;
uint16_t queue;
struct txa_service_queue_info *tqi;
- m = ev[i].mbuf;
- port = m->port;
- queue = rte_event_eth_tx_adapter_txq_get(m);
+ if (!(ev[i].event_type & RTE_EVENT_TYPE_VECTOR)) {
+ struct rte_mbuf *m;
- tqi = txa_service_queue(txa, port, queue);
- if (unlikely(tqi == NULL || !tqi->added)) {
- rte_pktmbuf_free(m);
- continue;
- }
+ m = ev[i].mbuf;
+ port = m->port;
+ queue = rte_event_eth_tx_adapter_txq_get(m);
- nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
+ tqi = txa_service_queue(txa, port, queue);
+ if (unlikely(tqi == NULL || !tqi->added)) {
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ nb_tx += rte_eth_tx_buffer(port, queue, tqi->tx_buf, m);
+ } else {
+ nb_tx += txa_process_event_vector(txa, ev[i].vec);
+ }
}
stats->tx_packets += nb_tx;
qdone = rte_zmalloc(txa->mem_name,
nb_queues * sizeof(*qdone), 0);
+ if (qdone == NULL)
+ return -ENOMEM;
j = 0;
for (i = 0; i < nb_queues; i++) {
if (txa_service_is_queue_added(txa, eth_dev, i))
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_create(uint8_t id, uint8_t dev_id,
struct rte_event_port_conf *port_conf)
{
txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
return ret;
}
-
+ rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, NULL, port_conf,
+ ret);
txa_dev_id_array[id] = dev_id;
return 0;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_create_ext(uint8_t id, uint8_t dev_id,
rte_event_eth_tx_adapter_conf_cb conf_cb,
void *conf_arg)
return ret;
}
+ rte_eventdev_trace_eth_tx_adapter_create(id, dev_id, conf_cb, conf_arg,
+ ret);
txa_dev_id_array[id] = dev_id;
return 0;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id)
{
TXA_CHECK_OR_ERR_RET(id);
return txa_service_event_port_get(id, event_port_id);
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_free(uint8_t id)
{
int ret;
ret = txa_service_adapter_free(id);
txa_dev_id_array[id] = TXA_INVALID_DEV_ID;
+ rte_eventdev_trace_eth_tx_adapter_free(id, ret);
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_queue_add(uint8_t id,
uint16_t eth_dev_id,
int32_t queue)
else
ret = txa_service_queue_add(id, txa_evdev(id), eth_dev, queue);
+ rte_eventdev_trace_eth_tx_adapter_queue_add(id, eth_dev_id, queue,
+ ret);
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_queue_del(uint8_t id,
uint16_t eth_dev_id,
int32_t queue)
else
ret = txa_service_queue_del(id, eth_dev, queue);
+ rte_eventdev_trace_eth_tx_adapter_queue_del(id, eth_dev_id, queue,
+ ret);
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_service_id_get(uint8_t id, uint32_t *service_id)
{
TXA_CHECK_OR_ERR_RET(id);
return txa_service_id_get(id, service_id);
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_start(uint8_t id)
{
int ret;
ret = txa_dev_start(id) ? txa_dev_start(id)(id, txa_evdev(id)) : 0;
if (ret == 0)
ret = txa_service_start(id);
+ rte_eventdev_trace_eth_tx_adapter_start(id, ret);
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_stats_get(uint8_t id,
struct rte_event_eth_tx_adapter_stats *stats)
{
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_stats_reset(uint8_t id)
{
int ret;
return ret;
}
-int __rte_experimental
+int
rte_event_eth_tx_adapter_stop(uint8_t id)
{
int ret;
ret = txa_dev_stop(id) ? txa_dev_stop(id)(id, txa_evdev(id)) : 0;
if (ret == 0)
ret = txa_service_stop(id);
+ rte_eventdev_trace_eth_tx_adapter_stop(id, ret);
return ret;
}