/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019-2021 Intel Corporation
*/
#include <stdint.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
-#include <rte_rawdev.h>
-#include <rte_ioat_rawdev.h>
+#include <rte_dmadev.h>
/* size of ring used for software copying between rx and tx. */
-#define RTE_LOGTYPE_IOAT RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_DMA RTE_LOGTYPE_USER1
#define MAX_PKT_BURST 32
#define MEMPOOL_CACHE_SIZE 512
#define MIN_POOL_SIZE 65536U
#define CMD_LINE_OPT_NB_QUEUE "nb-queue"
#define CMD_LINE_OPT_COPY_TYPE "copy-type"
#define CMD_LINE_OPT_RING_SIZE "ring-size"
+#define CMD_LINE_OPT_BATCH_SIZE "dma-batch-size"
+#define CMD_LINE_OPT_FRAME_SIZE "max-frame-size"
+#define CMD_LINE_OPT_STATS_INTERVAL "stats-interval"
/* configurable number of RX/TX ring descriptors */
#define RX_DEFAULT_RINGSIZE 1024
uint16_t nb_queues;
/* for software copy mode */
struct rte_ring *rx_to_tx_ring;
- /* for IOAT rawdev copy mode */
- uint16_t ioat_ids[MAX_RX_QUEUES_COUNT];
+ /* for dmadev HW copy mode */
+ uint16_t dmadev_ids[MAX_RX_QUEUES_COUNT];
};
+/* Configuring ports and number of assigned lcores in struct. 8< */
struct rxtx_transmission_config {
struct rxtx_port_config ports[RTE_MAX_ETHPORTS];
uint16_t nb_ports;
uint16_t nb_lcores;
};
+/* >8 End of configuration of ports and number of assigned lcores. */
/* per-port statistics struct */
struct ioat_port_statistics {
uint64_t copy_dropped[RTE_MAX_ETHPORTS];
};
struct ioat_port_statistics port_statistics;
-
struct total_statistics {
uint64_t total_packets_dropped;
uint64_t total_packets_tx;
uint64_t total_packets_rx;
- uint64_t total_successful_enqueues;
- uint64_t total_failed_enqueues;
+ uint64_t total_submitted;
+ uint64_t total_completed;
+ uint64_t total_failed;
};
typedef enum copy_mode_t {
*/
static unsigned short ring_size = 2048;
+/* interval, in seconds, between stats prints */
+static unsigned short stats_interval = 1;
+/* global mbuf arrays for tracking DMA bufs */
+#define MBUF_RING_SIZE 2048
+#define MBUF_RING_MASK (MBUF_RING_SIZE - 1)
+struct dma_bufs {
+ struct rte_mbuf *bufs[MBUF_RING_SIZE];
+ struct rte_mbuf *copies[MBUF_RING_SIZE];
+ uint16_t sent;
+};
+static struct dma_bufs dma_bufs[RTE_DMADEV_DEFAULT_MAX];
+
/* global transmission config */
struct rxtx_transmission_config cfg;
static volatile bool force_quit;
+static uint32_t ioat_batch_sz = MAX_PKT_BURST;
+static uint32_t max_frame_size = RTE_ETHER_MAX_LEN;
+
/* ethernet addresses of ports */
static struct rte_ether_addr ioat_ports_eth_addr[RTE_MAX_ETHPORTS];
/* Print out statistics for one IOAT rawdev device. */
static void
-print_rawdev_stats(uint32_t dev_id, uint64_t *xstats,
- unsigned int *ids_xstats, uint16_t nb_xstats,
- struct rte_rawdev_xstats_name *names_xstats)
+print_dmadev_stats(uint32_t dev_id, struct rte_dma_stats stats)
{
- uint16_t i;
-
- printf("\nIOAT channel %u", dev_id);
- for (i = 0; i < nb_xstats; i++)
- printf("\n\t %s: %*"PRIu64,
- names_xstats[ids_xstats[i]].name,
- (int)(37 - strlen(names_xstats[ids_xstats[i]].name)),
- xstats[i]);
+ printf("\nDMA channel %u", dev_id);
+ printf("\n\t Total submitted ops: %"PRIu64"", stats.submitted);
+ printf("\n\t Total completed ops: %"PRIu64"", stats.completed);
+ printf("\n\t Total failed ops: %"PRIu64"", stats.errors);
}
static void
print_total_stats(struct total_statistics *ts)
{
printf("\nAggregate statistics ==============================="
- "\nTotal packets Tx: %24"PRIu64" [pps]"
- "\nTotal packets Rx: %24"PRIu64" [pps]"
- "\nTotal packets dropped: %19"PRIu64" [pps]",
- ts->total_packets_tx,
- ts->total_packets_rx,
- ts->total_packets_dropped);
+ "\nTotal packets Tx: %22"PRIu64" [pkt/s]"
+ "\nTotal packets Rx: %22"PRIu64" [pkt/s]"
+ "\nTotal packets dropped: %17"PRIu64" [pkt/s]",
+ ts->total_packets_tx / stats_interval,
+ ts->total_packets_rx / stats_interval,
+ ts->total_packets_dropped / stats_interval);
if (copy_mode == COPY_MODE_IOAT_NUM) {
- printf("\nTotal IOAT successful enqueues: %8"PRIu64" [enq/s]"
- "\nTotal IOAT failed enqueues: %12"PRIu64" [enq/s]",
- ts->total_successful_enqueues,
- ts->total_failed_enqueues);
+ printf("\nTotal submitted ops: %19"PRIu64" [ops/s]"
+ "\nTotal completed ops: %19"PRIu64" [ops/s]"
+ "\nTotal failed ops: %22"PRIu64" [ops/s]",
+ ts->total_submitted / stats_interval,
+ ts->total_completed / stats_interval,
+ ts->total_failed / stats_interval);
}
printf("\n====================================================\n");
print_stats(char *prgname)
{
struct total_statistics ts, delta_ts;
+ struct rte_dma_stats stats = {0};
uint32_t i, port_id, dev_id;
- struct rte_rawdev_xstats_name *names_xstats;
- uint64_t *xstats;
- unsigned int *ids_xstats, nb_xstats;
- char status_string[120]; /* to print at the top of the output */
+ char status_string[255]; /* to print at the top of the output */
int status_strlen;
- int ret;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
"Rx Queues = %d, ", nb_queues);
status_strlen += snprintf(status_string + status_strlen,
sizeof(status_string) - status_strlen,
- "Ring Size = %d\n", ring_size);
-
- /* Allocate memory for xstats names and values */
- ret = rte_rawdev_xstats_names_get(
- cfg.ports[0].ioat_ids[0], NULL, 0);
- if (ret < 0)
- return;
- nb_xstats = (unsigned int)ret;
-
- names_xstats = malloc(sizeof(*names_xstats) * nb_xstats);
- if (names_xstats == NULL) {
- rte_exit(EXIT_FAILURE,
- "Error allocating xstat names memory\n");
- }
- rte_rawdev_xstats_names_get(cfg.ports[0].ioat_ids[0],
- names_xstats, nb_xstats);
-
- ids_xstats = malloc(sizeof(*ids_xstats) * 2);
- if (ids_xstats == NULL) {
- rte_exit(EXIT_FAILURE,
- "Error allocating xstat ids_xstats memory\n");
- }
-
- xstats = malloc(sizeof(*xstats) * 2);
- if (xstats == NULL) {
- rte_exit(EXIT_FAILURE,
- "Error allocating xstat memory\n");
- }
-
- /* Get failed/successful enqueues stats index */
- ids_xstats[0] = ids_xstats[1] = nb_xstats;
- for (i = 0; i < nb_xstats; i++) {
- if (!strcmp(names_xstats[i].name, "failed_enqueues"))
- ids_xstats[0] = i;
- else if (!strcmp(names_xstats[i].name, "successful_enqueues"))
- ids_xstats[1] = i;
- if (ids_xstats[0] < nb_xstats && ids_xstats[1] < nb_xstats)
- break;
- }
- if (ids_xstats[0] == nb_xstats || ids_xstats[1] == nb_xstats) {
- rte_exit(EXIT_FAILURE,
- "Error getting failed/successful enqueues stats index\n");
- }
+ "Ring Size = %d", ring_size);
memset(&ts, 0, sizeof(struct total_statistics));
while (!force_quit) {
- /* Sleep for 1 second each round - init sleep allows reading
+ /* Sleep for "stats_interval" seconds each round - init sleep allows reading
* messages from app startup.
*/
- sleep(1);
+ sleep(stats_interval);
/* Clear screen and move to top left */
printf("%s%s", clr, topLeft);
memset(&delta_ts, 0, sizeof(struct total_statistics));
- printf("%s", status_string);
+ printf("%s\n", status_string);
for (i = 0; i < cfg.nb_ports; i++) {
port_id = cfg.ports[i].rxtx_port;
uint32_t j;
for (j = 0; j < cfg.ports[i].nb_queues; j++) {
- dev_id = cfg.ports[i].ioat_ids[j];
- rte_rawdev_xstats_get(dev_id,
- ids_xstats, xstats, 2);
-
- print_rawdev_stats(dev_id, xstats,
- ids_xstats, 2, names_xstats);
+ dev_id = cfg.ports[i].dmadev_ids[j];
+ rte_dma_stats_get(dev_id, 0, &stats);
+ print_dmadev_stats(dev_id, stats);
- delta_ts.total_failed_enqueues +=
- xstats[ids_xstats[0]];
- delta_ts.total_successful_enqueues +=
- xstats[ids_xstats[1]];
+ delta_ts.total_submitted += stats.submitted;
+ delta_ts.total_completed += stats.completed;
+ delta_ts.total_failed += stats.errors;
}
}
}
delta_ts.total_packets_tx -= ts.total_packets_tx;
delta_ts.total_packets_rx -= ts.total_packets_rx;
delta_ts.total_packets_dropped -= ts.total_packets_dropped;
- delta_ts.total_failed_enqueues -= ts.total_failed_enqueues;
- delta_ts.total_successful_enqueues -=
- ts.total_successful_enqueues;
+ delta_ts.total_submitted -= ts.total_submitted;
+ delta_ts.total_completed -= ts.total_completed;
+ delta_ts.total_failed -= ts.total_failed;
printf("\n");
print_total_stats(&delta_ts);
ts.total_packets_tx += delta_ts.total_packets_tx;
ts.total_packets_rx += delta_ts.total_packets_rx;
ts.total_packets_dropped += delta_ts.total_packets_dropped;
- ts.total_failed_enqueues += delta_ts.total_failed_enqueues;
- ts.total_successful_enqueues +=
- delta_ts.total_successful_enqueues;
+ ts.total_submitted += delta_ts.total_submitted;
+ ts.total_completed += delta_ts.total_completed;
+ ts.total_failed += delta_ts.total_failed;
}
-
- free(names_xstats);
- free(xstats);
- free(ids_xstats);
}
static void
/* 02:00:00:00:00:xx - overwriting 2 bytes of source address but
* it's acceptable cause it gets overwritten by rte_ether_addr_copy
*/
- tmp = ð->d_addr.addr_bytes[0];
+ tmp = ð->dst_addr.addr_bytes[0];
*((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)dest_portid << 40);
/* src addr */
- rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->s_addr);
+ rte_ether_addr_copy(&ioat_ports_eth_addr[dest_portid], ð->src_addr);
}
+/* Perform packet copy there is a user-defined function. 8< */
static inline void
-pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
+pktmbuf_metadata_copy(const struct rte_mbuf *src, struct rte_mbuf *dst)
{
- /* Copy packet metadata */
- rte_memcpy(&dst->rearm_data,
- &src->rearm_data,
- offsetof(struct rte_mbuf, cacheline1)
- - offsetof(struct rte_mbuf, rearm_data));
+ dst->data_off = src->data_off;
+ memcpy(&dst->rx_descriptor_fields1, &src->rx_descriptor_fields1,
+ offsetof(struct rte_mbuf, buf_len) -
+ offsetof(struct rte_mbuf, rx_descriptor_fields1));
+}
- /* Copy packet data */
+/* Copy packet data */
+static inline void
+pktmbuf_sw_copy(struct rte_mbuf *src, struct rte_mbuf *dst)
+{
rte_memcpy(rte_pktmbuf_mtod(dst, char *),
rte_pktmbuf_mtod(src, char *), src->data_len);
}
+/* >8 End of perform packet copy there is a user-defined function. */
static uint32_t
-ioat_enqueue_packets(struct rte_mbuf **pkts,
+ioat_enqueue_packets(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
uint32_t nb_rx, uint16_t dev_id)
{
+ struct dma_bufs *dma = &dma_bufs[dev_id];
int ret;
uint32_t i;
- struct rte_mbuf *pkts_copy[MAX_PKT_BURST];
-
- const uint64_t addr_offset = RTE_PTR_DIFF(pkts[0]->buf_addr,
- &pkts[0]->rearm_data);
-
- ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
- (void *)pkts_copy, nb_rx);
-
- if (unlikely(ret < 0))
- rte_exit(EXIT_FAILURE, "Unable to allocate memory.\n");
for (i = 0; i < nb_rx; i++) {
/* Perform data copy */
- ret = rte_ioat_enqueue_copy(dev_id,
- pkts[i]->buf_iova
- - addr_offset,
- pkts_copy[i]->buf_iova
- - addr_offset,
- rte_pktmbuf_data_len(pkts[i])
- + addr_offset,
- (uintptr_t)pkts[i],
- (uintptr_t)pkts_copy[i],
- 0 /* nofence */);
-
- if (ret != 1)
+ ret = rte_dma_copy(dev_id, 0,
+ rte_pktmbuf_iova(pkts[i]),
+ rte_pktmbuf_iova(pkts_copy[i]),
+ rte_pktmbuf_data_len(pkts[i]), 0);
+
+ if (ret < 0)
break;
+
+ dma->bufs[ret & MBUF_RING_MASK] = pkts[i];
+ dma->copies[ret & MBUF_RING_MASK] = pkts_copy[i];
}
ret = i;
- /* Free any not enqueued packets. */
- rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts[i], nb_rx - i);
- rte_mempool_put_bulk(ioat_pktmbuf_pool, (void *)&pkts_copy[i],
- nb_rx - i);
-
return ret;
}
-/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
+static inline uint32_t
+ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
+ uint32_t num, uint32_t step, uint16_t dev_id)
+{
+ uint32_t i, k, m, n;
+
+ k = 0;
+ for (i = 0; i < num; i += m) {
+
+ m = RTE_MIN(step, num - i);
+ n = ioat_enqueue_packets(pkts + i, pkts_copy + i, m, dev_id);
+ k += n;
+ if (n > 0)
+ rte_dma_submit(dev_id, 0);
+
+ /* don't try to enqueue more if HW queue is full */
+ if (n != m)
+ break;
+ }
+
+ return k;
+}
+
+static inline uint32_t
+ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
+ uint16_t dev_id)
+{
+ struct dma_bufs *dma = &dma_bufs[dev_id];
+ uint16_t nb_dq, filled;
+ /* Dequeue the mbufs from IOAT device. Since all memory
+ * is DPDK pinned memory and therefore all addresses should
+ * be valid, we don't check for copy errors
+ */
+ nb_dq = rte_dma_completed(dev_id, 0, num, NULL, NULL);
+
+ /* Return early if no work to do */
+ if (unlikely(nb_dq == 0))
+ return nb_dq;
+
+ /* Populate pkts_copy with the copies bufs from dma->copies */
+ for (filled = 0; filled < nb_dq; filled++) {
+ src[filled] = dma->bufs[(dma->sent + filled) & MBUF_RING_MASK];
+ dst[filled] = dma->copies[(dma->sent + filled) & MBUF_RING_MASK];
+ }
+ dma->sent += nb_dq;
+
+ return filled;
+
+}
+
+/* Receive packets on one port and enqueue to IOAT rawdev or rte_ring. 8< */
static void
ioat_rx_port(struct rxtx_port_config *rx_config)
{
+ int32_t ret;
uint32_t nb_rx, nb_enq, i, j;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
for (i = 0; i < rx_config->nb_queues; i++) {
port_statistics.rx[rx_config->rxtx_port] += nb_rx;
+ ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
+ (void *)pkts_burst_copy, nb_rx);
+
+ if (unlikely(ret < 0))
+ rte_exit(EXIT_FAILURE,
+ "Unable to allocate memory.\n");
+
+ for (j = 0; j < nb_rx; j++)
+ pktmbuf_metadata_copy(pkts_burst[j],
+ pkts_burst_copy[j]);
+
if (copy_mode == COPY_MODE_IOAT_NUM) {
- /* Perform packet hardware copy */
- nb_enq = ioat_enqueue_packets(pkts_burst,
- nb_rx, rx_config->ioat_ids[i]);
- if (nb_enq > 0)
- rte_ioat_do_copies(rx_config->ioat_ids[i]);
- } else {
- /* Perform packet software copy, free source packets */
- int ret;
- struct rte_mbuf *pkts_burst_copy[MAX_PKT_BURST];
- ret = rte_mempool_get_bulk(ioat_pktmbuf_pool,
- (void *)pkts_burst_copy, nb_rx);
+ /* enqueue packets for hardware copy */
+ nb_enq = ioat_enqueue(pkts_burst, pkts_burst_copy,
+ nb_rx, ioat_batch_sz, rx_config->dmadev_ids[i]);
- if (unlikely(ret < 0))
- rte_exit(EXIT_FAILURE,
- "Unable to allocate memory.\n");
+ /* free any not enqueued packets. */
+ rte_mempool_put_bulk(ioat_pktmbuf_pool,
+ (void *)&pkts_burst[nb_enq],
+ nb_rx - nb_enq);
+ rte_mempool_put_bulk(ioat_pktmbuf_pool,
+ (void *)&pkts_burst_copy[nb_enq],
+ nb_rx - nb_enq);
+ port_statistics.copy_dropped[rx_config->rxtx_port] +=
+ (nb_rx - nb_enq);
+
+ /* get completed copies */
+ nb_rx = ioat_dequeue(pkts_burst, pkts_burst_copy,
+ MAX_PKT_BURST, rx_config->dmadev_ids[i]);
+ } else {
+ /* Perform packet software copy, free source packets */
for (j = 0; j < nb_rx; j++)
pktmbuf_sw_copy(pkts_burst[j],
pkts_burst_copy[j]);
+ }
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)pkts_burst, nb_rx);
+ rte_mempool_put_bulk(ioat_pktmbuf_pool,
+ (void *)pkts_burst, nb_rx);
- nb_enq = rte_ring_enqueue_burst(
- rx_config->rx_to_tx_ring,
- (void *)pkts_burst_copy, nb_rx, NULL);
+ nb_enq = rte_ring_enqueue_burst(rx_config->rx_to_tx_ring,
+ (void *)pkts_burst_copy, nb_rx, NULL);
- /* Free any not enqueued packets. */
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)&pkts_burst_copy[nb_enq],
- nb_rx - nb_enq);
- }
+ /* Free any not enqueued packets. */
+ rte_mempool_put_bulk(ioat_pktmbuf_pool,
+ (void *)&pkts_burst_copy[nb_enq],
+ nb_rx - nb_enq);
port_statistics.copy_dropped[rx_config->rxtx_port] +=
(nb_rx - nb_enq);
}
}
+/* >8 End of receive packets on one port and enqueue to IOAT rawdev or rte_ring. */
-/* Transmit packets from IOAT rawdev/rte_ring for one port. */
+/* Transmit packets from IOAT rawdev/rte_ring for one port. 8< */
static void
ioat_tx_port(struct rxtx_port_config *tx_config)
{
- uint32_t i, j, nb_dq = 0;
- struct rte_mbuf *mbufs_src[MAX_PKT_BURST];
- struct rte_mbuf *mbufs_dst[MAX_PKT_BURST];
+ uint32_t i, j, nb_dq, nb_tx;
+ struct rte_mbuf *mbufs[MAX_PKT_BURST];
for (i = 0; i < tx_config->nb_queues; i++) {
- if (copy_mode == COPY_MODE_IOAT_NUM) {
- /* Deque the mbufs from IOAT device. */
- nb_dq = rte_ioat_completed_copies(
- tx_config->ioat_ids[i], MAX_PKT_BURST,
- (void *)mbufs_src, (void *)mbufs_dst);
- } else {
- /* Deque the mbufs from rx_to_tx_ring. */
- nb_dq = rte_ring_dequeue_burst(
- tx_config->rx_to_tx_ring, (void *)mbufs_dst,
- MAX_PKT_BURST, NULL);
- }
- if ((int32_t) nb_dq <= 0)
- return;
-
- if (copy_mode == COPY_MODE_IOAT_NUM)
- rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)mbufs_src, nb_dq);
+ /* Dequeue the mbufs from rx_to_tx_ring. */
+ nb_dq = rte_ring_dequeue_burst(tx_config->rx_to_tx_ring,
+ (void *)mbufs, MAX_PKT_BURST, NULL);
+ if (nb_dq == 0)
+ continue;
/* Update macs if enabled */
if (mac_updating) {
for (j = 0; j < nb_dq; j++)
- update_mac_addrs(mbufs_dst[j],
+ update_mac_addrs(mbufs[j],
tx_config->rxtx_port);
}
- const uint16_t nb_tx = rte_eth_tx_burst(
- tx_config->rxtx_port, 0,
- (void *)mbufs_dst, nb_dq);
+ nb_tx = rte_eth_tx_burst(tx_config->rxtx_port, 0,
+ (void *)mbufs, nb_dq);
port_statistics.tx[tx_config->rxtx_port] += nb_tx;
/* Free any unsent packets. */
if (unlikely(nb_tx < nb_dq))
rte_mempool_put_bulk(ioat_pktmbuf_pool,
- (void *)&mbufs_dst[nb_tx],
- nb_dq - nb_tx);
+ (void *)&mbufs[nb_tx], nb_dq - nb_tx);
}
}
+/* >8 End of transmitting packets from IOAT. */
/* Main rx processing loop for IOAT rawdev. */
static void
uint16_t i;
uint16_t nb_ports = cfg.nb_ports;
- RTE_LOG(INFO, IOAT, "Entering main rx loop for copy on lcore %u\n",
+ RTE_LOG(INFO, DMA, "Entering main rx loop for copy on lcore %u\n",
rte_lcore_id());
while (!force_quit)
uint16_t i;
uint16_t nb_ports = cfg.nb_ports;
- RTE_LOG(INFO, IOAT, "Entering main tx loop for copy on lcore %u\n",
+ RTE_LOG(INFO, DMA, "Entering main tx loop for copy on lcore %u\n",
rte_lcore_id());
while (!force_quit)
ioat_tx_port(&cfg.ports[i]);
}
-/* Main rx and tx loop if only one slave lcore available */
+/* Main rx and tx loop if only one worker lcore available */
static void
rxtx_main_loop(void)
{
uint16_t i;
uint16_t nb_ports = cfg.nb_ports;
- RTE_LOG(INFO, IOAT, "Entering main rx and tx loop for copy on"
+ RTE_LOG(INFO, DMA, "Entering main rx and tx loop for copy on"
" lcore %u\n", rte_lcore_id());
while (!force_quit)
}
}
+/* Start processing for each lcore. 8< */
static void start_forwarding_cores(void)
{
uint32_t lcore_id = rte_lcore_id();
- RTE_LOG(INFO, IOAT, "Entering %s on lcore %u\n",
+ RTE_LOG(INFO, DMA, "Entering %s on lcore %u\n",
__func__, rte_lcore_id());
if (cfg.nb_lcores == 1) {
lcore_id);
}
}
+/* >8 End of starting to processfor each lcore. */
/* Display usage */
static void
ioat_usage(const char *prgname)
{
printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+ " -b --dma-batch-size: number of requests per DMA batch\n"
+ " -f --max-frame-size: max frame size\n"
" -p --portmask: hexadecimal bitmask of ports to configure\n"
" -q NQ: number of RX queues per port (default is 1)\n"
" --[no-]mac-updating: Enable or disable MAC addresses updating (enabled by default)\n"
" - The source MAC address is replaced by the TX port MAC address\n"
" - The destination MAC address is replaced by 02:00:00:00:00:TX_PORT_ID\n"
" -c --copy-type CT: type of copy: sw|hw\n"
- " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n",
+ " -s --ring-size RS: size of IOAT rawdev ring for hardware copy mode or rte_ring for software copy mode\n"
+ " -i --stats-interval SI: interval, in seconds, between stats prints (default is 1)\n",
prgname);
}
ioat_parse_args(int argc, char **argv, unsigned int nb_ports)
{
static const char short_options[] =
+ "b:" /* dma batch size */
+ "c:" /* copy type (sw|hw) */
+ "f:" /* max frame size */
"p:" /* portmask */
"q:" /* number of RX queues per port */
- "c:" /* copy type (sw|hw) */
"s:" /* ring size */
+ "i:" /* interval, in seconds, between stats prints */
;
static const struct option lgopts[] = {
{CMD_LINE_OPT_NB_QUEUE, required_argument, NULL, 'q'},
{CMD_LINE_OPT_COPY_TYPE, required_argument, NULL, 'c'},
{CMD_LINE_OPT_RING_SIZE, required_argument, NULL, 's'},
+ {CMD_LINE_OPT_BATCH_SIZE, required_argument, NULL, 'b'},
+ {CMD_LINE_OPT_FRAME_SIZE, required_argument, NULL, 'f'},
+ {CMD_LINE_OPT_STATS_INTERVAL, required_argument, NULL, 'i'},
{NULL, 0, 0, 0}
};
lgopts, &option_index)) != EOF) {
switch (opt) {
+ case 'b':
+ ioat_batch_sz = atoi(optarg);
+ if (ioat_batch_sz > MAX_PKT_BURST) {
+ printf("Invalid dma batch size, %s.\n", optarg);
+ ioat_usage(prgname);
+ return -1;
+ }
+ break;
+ case 'f':
+ max_frame_size = atoi(optarg);
+ if (max_frame_size > RTE_ETHER_MAX_JUMBO_FRAME_LEN) {
+ printf("Invalid max frame size, %s.\n", optarg);
+ ioat_usage(prgname);
+ return -1;
+ }
+ break;
+
/* portmask */
case 'p':
ioat_enabled_port_mask = ioat_parse_portmask(optarg);
ioat_usage(prgname);
return -1;
}
+ /* ring_size must be less-than or equal to MBUF_RING_SIZE
+ * to avoid overwriting bufs
+ */
+ if (ring_size > MBUF_RING_SIZE) {
+ printf("Max ring_size is %d, setting ring_size to max",
+ MBUF_RING_SIZE);
+ ring_size = MBUF_RING_SIZE;
+ }
+ break;
+
+ case 'i':
+ stats_interval = atoi(optarg);
+ if (stats_interval == 0) {
+ printf("Invalid stats interval, setting to 1\n");
+ stats_interval = 1; /* set to default */
+ }
break;
/* long options */
uint16_t portid;
struct rte_eth_link link;
int ret, link_status = 0;
+ char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
printf("\nChecking link status\n");
RTE_ETH_FOREACH_DEV(portid) {
}
/* Print link status */
- if (link.link_status) {
- printf(
- "Port %d Link Up. Speed %u Mbps - %s\n",
- portid, link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex"));
+ rte_eth_link_to_str(link_status_text,
+ sizeof(link_status_text), &link);
+ printf("Port %d %s\n", portid, link_status_text);
+
+ if (link.link_status)
link_status = 1;
- } else
- printf("Port %d Link Down\n", portid);
}
return link_status;
}
+/* Configuration of device. 8< */
static void
configure_rawdev_queue(uint32_t dev_id)
{
- struct rte_ioat_rawdev_config dev_config = { .ring_size = ring_size };
- struct rte_rawdev_info info = { .dev_private = &dev_config };
+ struct rte_dma_info info;
+ struct rte_dma_conf dev_config = { .nb_vchans = 1 };
+ struct rte_dma_vchan_conf qconf = {
+ .direction = RTE_DMA_DIR_MEM_TO_MEM,
+ .nb_desc = ring_size
+ };
+ uint16_t vchan = 0;
- if (rte_rawdev_configure(dev_id, &info, sizeof(dev_config)) != 0) {
- rte_exit(EXIT_FAILURE,
- "Error with rte_rawdev_configure()\n");
+ if (rte_dma_configure(dev_id, &dev_config) != 0)
+ rte_exit(EXIT_FAILURE, "Error with rte_dma_configure()\n");
+
+ if (rte_dma_vchan_setup(dev_id, vchan, &qconf) != 0) {
+ printf("Error with queue configuration\n");
+ rte_panic();
}
- if (rte_rawdev_start(dev_id) != 0) {
- rte_exit(EXIT_FAILURE,
- "Error with rte_rawdev_start()\n");
+ rte_dma_info_get(dev_id, &info);
+ if (info.nb_vchans != 1) {
+ printf("Error, no configured queues reported on device id %u\n", dev_id);
+ rte_panic();
}
+ if (rte_dma_start(dev_id) != 0)
+ rte_exit(EXIT_FAILURE, "Error with rte_dma_start()\n");
}
+/* >8 End of configuration of device. */
+/* Using IOAT rawdev API functions. 8< */
static void
assign_rawdevs(void)
{
- uint16_t nb_rawdev = 0, rdev_id = 0;
+ uint16_t nb_rawdev = 0;
+ int16_t rdev_id = rte_dma_next_dev(0);
uint32_t i, j;
for (i = 0; i < cfg.nb_ports; i++) {
for (j = 0; j < cfg.ports[i].nb_queues; j++) {
- struct rte_rawdev_info rdev_info = { 0 };
-
- do {
- if (rdev_id == rte_rawdev_count())
- goto end;
- rte_rawdev_info_get(rdev_id++, &rdev_info, 0);
- } while (rdev_info.driver_name == NULL ||
- strcmp(rdev_info.driver_name,
- IOAT_PMD_RAWDEV_NAME_STR) != 0);
-
- cfg.ports[i].ioat_ids[j] = rdev_id - 1;
- configure_rawdev_queue(cfg.ports[i].ioat_ids[j]);
+ if (rdev_id == -1)
+ goto end;
+
+ cfg.ports[i].dmadev_ids[j] = rdev_id;
+ configure_rawdev_queue(cfg.ports[i].dmadev_ids[j]);
+ rdev_id = rte_dma_next_dev(rdev_id + 1);
++nb_rawdev;
}
}
rte_exit(EXIT_FAILURE,
"Not enough IOAT rawdevs (%u) for all queues (%u).\n",
nb_rawdev, cfg.nb_ports * cfg.ports[0].nb_queues);
- RTE_LOG(INFO, IOAT, "Number of used rawdevs: %u.\n", nb_rawdev);
+ RTE_LOG(INFO, DMA, "Number of used rawdevs: %u.\n", nb_rawdev);
}
+/* >8 End of using IOAT rawdev API functions. */
+/* Assign ring structures for packet exchanging. 8< */
static void
assign_rings(void)
{
rte_strerror(rte_errno));
}
}
+/* >8 End of assigning ring structures for packet exchanging. */
/*
* Initializes a given port using global settings and with the RX buffers
static inline void
port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
{
- /* configuring port to use RSS for multiple RX queues */
+ /* Configuring port to use RSS for multiple RX queues. 8< */
static const struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN
+ .mq_mode = RTE_ETH_MQ_RX_RSS,
},
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_PROTO_MASK,
+ .rss_hf = RTE_ETH_RSS_PROTO_MASK,
}
}
};
+ /* >8 End of configuring port to use RSS for multiple RX queues. */
struct rte_eth_rxconf rxq_conf;
struct rte_eth_txconf txq_conf;
struct rte_eth_dev_info dev_info;
int ret, i;
+ if (max_frame_size > local_port_conf.rxmode.mtu)
+ local_port_conf.rxmode.mtu = max_frame_size;
+
/* Skip ports that are not enabled */
if ((ioat_enabled_port_mask & (1 << portid)) == 0) {
printf("Skipping disabled port %u\n", portid);
local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
dev_info.flow_type_rss_offloads;
- if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- local_port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
ret = rte_eth_dev_configure(portid, nb_queues, 1, &local_port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device:"
"Cannot set error callback for tx buffer on port %u\n",
portid);
- /* Start device */
+ /* Start device. 8< */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
ret, portid);
+ /* >8 End of starting device. */
+ /* RX port is set in promiscuous mode. 8< */
rte_eth_promiscuous_enable(portid);
+ /* >8 End of RX port is set in promiscuous mode. */
- printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+ printf("Port %u, MAC address: " RTE_ETHER_ADDR_PRT_FMT "\n\n",
portid,
- ioat_ports_eth_addr[portid].addr_bytes[0],
- ioat_ports_eth_addr[portid].addr_bytes[1],
- ioat_ports_eth_addr[portid].addr_bytes[2],
- ioat_ports_eth_addr[portid].addr_bytes[3],
- ioat_ports_eth_addr[portid].addr_bytes[4],
- ioat_ports_eth_addr[portid].addr_bytes[5]);
+ RTE_ETHER_ADDR_BYTES(&ioat_ports_eth_addr[portid]));
cfg.ports[cfg.nb_ports].rxtx_port = portid;
cfg.ports[cfg.nb_ports++].nb_queues = nb_queues;
}
+/* Get a device dump for each device being used by the application */
+static void
+rawdev_dump(void)
+{
+ uint32_t i, j;
+
+ if (copy_mode != COPY_MODE_IOAT_NUM)
+ return;
+
+ for (i = 0; i < cfg.nb_ports; i++)
+ for (j = 0; j < cfg.ports[i].nb_queues; j++)
+ rte_dma_dump(cfg.ports[i].dmadev_ids[j], stdout);
+}
+
static void
signal_handler(int signum)
{
printf("\n\nSignal %d received, preparing to exit...\n",
signum);
force_quit = true;
+ } else if (signum == SIGUSR1) {
+ rawdev_dump();
}
}
uint16_t nb_ports, portid;
uint32_t i;
unsigned int nb_mbufs;
+ size_t sz;
- /* Init EAL */
+ /* Init EAL. 8< */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+ /* >8 End of init EAL. */
argc -= ret;
argv += ret;
force_quit = false;
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
+ signal(SIGUSR1, signal_handler);
nb_ports = rte_eth_dev_count_avail();
if (nb_ports == 0)
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IOAT arguments\n");
+ /* Allocates mempool to hold the mbufs. 8< */
nb_mbufs = RTE_MAX(nb_ports * (nb_queues * (nb_rxd + nb_txd +
- 4 * MAX_PKT_BURST) + rte_lcore_count() * MEMPOOL_CACHE_SIZE),
+ 4 * MAX_PKT_BURST + ring_size) + ring_size +
+ rte_lcore_count() * MEMPOOL_CACHE_SIZE),
MIN_POOL_SIZE);
/* Create the mbuf pool */
+ sz = max_frame_size + RTE_PKTMBUF_HEADROOM;
+ sz = RTE_MAX(sz, (size_t)RTE_MBUF_DEFAULT_BUF_SIZE);
ioat_pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", nb_mbufs,
- MEMPOOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE,
- rte_socket_id());
+ MEMPOOL_CACHE_SIZE, 0, sz, rte_socket_id());
if (ioat_pktmbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+ /* >8 End of allocates mempool to hold the mbufs. */
- /* Initialise each port */
+ /* Initialize each port. 8< */
cfg.nb_ports = 0;
RTE_ETH_FOREACH_DEV(portid)
port_init(portid, ioat_pktmbuf_pool, nb_queues);
+ /* >8 End of initializing each port. */
/* Initialize port xstats */
memset(&port_statistics, 0, sizeof(port_statistics));
+ /* Assigning each port resources. 8< */
while (!check_link_status(ioat_enabled_port_mask) && !force_quit)
sleep(1);
cfg.nb_lcores = rte_lcore_count() - 1;
if (cfg.nb_lcores < 1)
rte_exit(EXIT_FAILURE,
- "There should be at least one slave lcore.\n");
+ "There should be at least one worker lcore.\n");
if (copy_mode == COPY_MODE_IOAT_NUM)
assign_rawdevs();
- else /* copy_mode == COPY_MODE_SW_NUM */
- assign_rings();
+
+ assign_rings();
+ /* >8 End of assigning each port resources. */
start_forwarding_cores();
- /* master core prints stats while other cores forward */
+ /* main core prints stats while other cores forward */
print_stats(argv[0]);
/* force_quit is true when we get here */
uint32_t j;
for (i = 0; i < cfg.nb_ports; i++) {
printf("Closing port %d\n", cfg.ports[i].rxtx_port);
- rte_eth_dev_stop(cfg.ports[i].rxtx_port);
+ ret = rte_eth_dev_stop(cfg.ports[i].rxtx_port);
+ if (ret != 0)
+ RTE_LOG(ERR, DMA, "rte_eth_dev_stop: err=%s, port=%u\n",
+ rte_strerror(-ret), cfg.ports[i].rxtx_port);
+
rte_eth_dev_close(cfg.ports[i].rxtx_port);
if (copy_mode == COPY_MODE_IOAT_NUM) {
for (j = 0; j < cfg.ports[i].nb_queues; j++) {
printf("Stopping rawdev %d\n",
- cfg.ports[i].ioat_ids[j]);
- rte_rawdev_stop(cfg.ports[i].ioat_ids[j]);
+ cfg.ports[i].dmadev_ids[j]);
+ rte_dma_stop(cfg.ports[i].dmadev_ids[j]);
}
} else /* copy_mode == COPY_MODE_SW_NUM */
rte_ring_free(cfg.ports[i].rx_to_tx_ring);
}
+ /* clean up the EAL */
+ rte_eal_cleanup();
+
printf("Bye...\n");
return 0;
}