struct rte_eth_rxconf rx_conf;
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IP,
+ .rss_hf = RTE_ETH_RSS_IP,
},
},
};
return -EINVAL;
}
- port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
- if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
t->internal_port = 1;
RTE_ETH_FOREACH_DEV(i) {
if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
local_port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
ret = rte_eth_dev_info_get(i, &dev_info);
if (ret != 0) {
}
/* Enable mbuf fast free if PMD has the capability. */
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
rx_conf = dev_info.default_rxconf;
rx_conf.offloads = port_conf.rxmode.offloads;
}
for (j = 0; j < opt->eth_queues; j++) {
- if (rte_eth_rx_queue_setup(i, j, NB_RX_DESC,
- rte_socket_id(), &rx_conf,
- t->pool) < 0) {
+ if (rte_eth_rx_queue_setup(
+ i, j, NB_RX_DESC, rte_socket_id(), &rx_conf,
+ opt->per_port_pool ? t->pool[i] :
+ t->pool[0]) < 0) {
evt_err("Failed to setup eth port [%d] rx_queue: %d.",
i, 0);
return -EINVAL;
uint16_t prod;
struct rte_mempool *vector_pool = NULL;
struct rte_event_eth_rx_adapter_queue_conf queue_conf;
- struct rte_event_eth_rx_adapter_event_vector_config vec_conf;
memset(&queue_conf, 0,
sizeof(struct rte_event_eth_rx_adapter_queue_conf));
}
if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
+ queue_conf.vector_sz = opt->vector_size;
+ queue_conf.vector_timeout_ns =
+ opt->vector_tmo_nsec;
queue_conf.rx_queue_flags |=
RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
+ queue_conf.vector_mp = vector_pool;
} else {
evt_err("Rx adapter doesn't support event vector");
return -EINVAL;
return ret;
}
- if (opt->ena_vector) {
- vec_conf.vector_sz = opt->vector_size;
- vec_conf.vector_timeout_ns = opt->vector_tmo_nsec;
- vec_conf.vector_mp = vector_pool;
- if (rte_event_eth_rx_adapter_queue_event_vector_config(
- prod, prod, -1, &vec_conf) < 0) {
- evt_err("Failed to configure event vectorization for Rx adapter");
- return -EINVAL;
- }
- }
-
if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
uint32_t service_id = -1U;
return ret;
}
+static void
+pipeline_vector_array_free(struct rte_event events[], uint16_t num)
+{
+ uint16_t i;
+
+ for (i = 0; i < num; i++) {
+ rte_pktmbuf_free_bulk(events[i].vec->mbufs,
+ events[i].vec->nb_elem);
+ rte_mempool_put(rte_mempool_from_obj(events[i].vec),
+ events[i].vec);
+ }
+}
+
+static void
+pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
+ void *args __rte_unused)
+{
+ if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev, 1);
+ else
+ rte_pktmbuf_free(ev.mbuf);
+}
+
+void
+pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
+ uint16_t enq, uint16_t deq)
+{
+ int i;
+
+ if (!(deq - enq))
+ return;
+
+ if (deq) {
+ for (i = enq; i < deq; i++) {
+ if (ev[i].op == RTE_EVENT_OP_RELEASE)
+ continue;
+ if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
+ pipeline_vector_array_free(&ev[i], 1);
+ else
+ rte_pktmbuf_free(ev[i].mbuf);
+ }
+
+ for (i = 0; i < deq; i++)
+ ev[i].op = RTE_EVENT_OP_RELEASE;
+
+ rte_event_enqueue_burst(dev, port, ev, deq);
+ }
+
+ rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
+}
+
+void
+pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
+{
+ uint16_t i, j;
+ RTE_SET_USED(test);
+
+ if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
+ RTE_ETH_FOREACH_DEV(i) {
+ rte_event_eth_rx_adapter_stop(i);
+ rte_event_eth_rx_adapter_queue_del(i, i, -1);
+ for (j = 0; j < opt->eth_queues; j++)
+ rte_eth_dev_rx_queue_stop(i, j);
+ }
+ }
+}
+
void
pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
{
RTE_SET_USED(opt);
RTE_ETH_FOREACH_DEV(i) {
- rte_event_eth_rx_adapter_stop(i);
rte_event_eth_tx_adapter_stop(i);
+ rte_event_eth_tx_adapter_queue_del(i, i, -1);
+ rte_eth_dev_tx_queue_stop(i, 0);
rte_eth_dev_stop(i);
}
}
if (data_size > opt->mbuf_sz)
opt->mbuf_sz = data_size;
}
+ if (opt->per_port_pool) {
+ char name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d",
+ test->name, i);
+ t->pool[i] = rte_pktmbuf_pool_create(
+ name, /* mempool name */
+ opt->pool_sz, /* number of elements*/
+ 0, /* cache size*/
+ 0, opt->mbuf_sz, opt->socket_id); /* flags */
+
+ if (t->pool[i] == NULL) {
+ evt_err("failed to create mempool %s", name);
+ return -ENOMEM;
+ }
+ }
}
- t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */
+ if (!opt->per_port_pool) {
+ t->pool[0] = rte_pktmbuf_pool_create(
+ test->name, /* mempool name */
opt->pool_sz, /* number of elements*/
- 512, /* cache size*/
- 0,
- opt->mbuf_sz,
- opt->socket_id); /* flags */
-
- if (t->pool == NULL) {
- evt_err("failed to create mempool");
- return -ENOMEM;
+ 0, /* cache size*/
+ 0, opt->mbuf_sz, opt->socket_id); /* flags */
+
+ if (t->pool[0] == NULL) {
+ evt_err("failed to create mempool");
+ return -ENOMEM;
+ }
}
return 0;
void
pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
{
- RTE_SET_USED(opt);
struct test_pipeline *t = evt_test_priv(test);
+ int i;
- rte_mempool_free(t->pool);
+ RTE_SET_USED(opt);
+ if (opt->per_port_pool) {
+ RTE_ETH_FOREACH_DEV(i)
+ rte_mempool_free(t->pool[i]);
+ } else {
+ rte_mempool_free(t->pool[0]);
+ }
}
int