This commit adds a new statistic to the SW eventdev PMD.
The statistic shows how many packets were sent from a
queue to a port. This provides information on how traffic
from a specific queue is being load-balanced to worker cores.
Note that these numbers should be compared across all queue
stages - the load-balancing does not try to perfectly share
each queue's traffic, rather it balances the overall traffic
from all queues to the ports.
The statistic is printed from the rte_eventdev_dump() function,
as well as being made available via the xstats API.
Unit tests have been updated to expect more per-queue statistics,
and the correctness of counts and counts after reset is verified.
Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
inflights += qid->fids[flow].pcount;
}
inflights += qid->fids[flow].pcount;
}
- uint32_t cq;
- fprintf(f, "\tInflights: %u\tFlows pinned per port: ",
- inflights);
- for (cq = 0; cq < sw->port_count; cq++)
- fprintf(f, "%d ", affinities_per_port[cq]);
- fprintf(f, "\n");
+ uint32_t port;
+ fprintf(f, "\tPer Port Stats:\n");
+ for (port = 0; port < sw->port_count; port++) {
+ fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
+ qid->to_port[port]);
+ fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
+ }
uint32_t iq;
uint32_t iq_printed = 0;
uint32_t iq;
uint32_t iq_printed = 0;
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
uint32_t cq_num_mapped_cqs;
uint32_t cq_next_tx; /* cq to write next (non-atomic) packet */
uint32_t cq_map[SW_PORTS_MAX];
+ uint64_t to_port[SW_PORTS_MAX];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
/* Track flow ids for atomic load balancing */
struct sw_fid_t fids[SW_QID_NUM_FIDS];
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
p->stats.tx_pkts++;
qid->stats.tx_pkts++;
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
/* if we just filled in the last slot, flush the buffer */
if (sw->cq_ring_space[cq] == 0) {
iq_used,
/* qid port mapping specific */
pinned,
iq_used,
/* qid port mapping specific */
pinned,
+ pkts, /* note: qid-to-port pkts */
};
typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
};
typedef uint64_t (*xstats_fn)(const struct sw_evdev *dev,
return pin;
} while (0);
break;
return pin;
} while (0);
break;
+ case pkts:
+ return qid->to_port[port];
static const enum xstats_type qid_iq_types[] = { iq_used };
/* reset allowed */
static const enum xstats_type qid_iq_types[] = { iq_used };
/* reset allowed */
- static const char * const qid_port_stats[] = { "pinned_flows" };
- static const enum xstats_type qid_port_types[] = { pinned };
+ static const char * const qid_port_stats[] = { "pinned_flows",
+ "packets"
+ };
+ static const enum xstats_type qid_port_types[] = { pinned, pkts };
+ static const uint8_t qid_port_reset_allowed[] = {0, 1};
/* reset allowed */
/* ---- end of stat definitions ---- */
/* reset allowed */
/* ---- end of stat definitions ---- */
.stat = qid_port_types[i],
.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
.extra_arg = port,
.stat = qid_port_types[i],
.mode = RTE_EVENT_DEV_XSTATS_QUEUE,
.extra_arg = port,
+ .reset_allowed =
+ qid_port_reset_allowed[i],
};
snprintf(sname, sizeof(sname),
"qid_%u_port_%u_%s",
};
snprintf(sname, sizeof(sname),
"qid_%u_port_%u_%s",
ret = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, xstats_names, ids, XSTATS_MAX);
ret = rte_event_dev_xstats_names_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, xstats_names, ids, XSTATS_MAX);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, ids, values, ret);
ret = rte_event_dev_xstats_get(evdev,
RTE_EVENT_DEV_XSTATS_QUEUE,
0, ids, values, ret);
- if (ret != 13) {
- printf("%d: expected 13 stats, got return %d\n", __LINE__, ret);
+ if (ret != 17) {
+ printf("%d: expected 17 stats, got return %d\n", __LINE__, ret);
3 /* inflights */,
512 /* iq size */,
0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
3 /* inflights */,
512 /* iq size */,
0, 0, 0, 0, /* iq 0, 1, 2, 3 used */
- 0, 0, 1, 0, /* qid_0_port_X_pinned_flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 3,
+ 0, 0,
};
for (i = 0; (signed int)i < ret; i++) {
if (queue_expected[i] != values[i]) {
};
for (i = 0; (signed int)i < ret; i++) {
if (queue_expected[i] != values[i]) {
3 /* inflight */,
512 /* iq size */,
0, 0, 0, 0, /* 4 iq used */
3 /* inflight */,
512 /* iq size */,
0, 0, 0, 0, /* 4 iq used */
- 0, 0, 1, 0, /* qid to port pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
};
ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0,
goto fail;
/* num queue stats */
goto fail;
/* num queue stats */
/* queue offset from start of the devices whole xstats.
* This will break every time we add a statistic to a device/port/queue
*/
/* queue offset from start of the devices whole xstats.
* This will break every time we add a statistic to a device/port/queue
*/
"qid_0_iq_2_used",
"qid_0_iq_3_used",
"qid_0_port_0_pinned_flows",
"qid_0_iq_2_used",
"qid_0_iq_3_used",
"qid_0_port_0_pinned_flows",
+ "qid_0_port_0_packets",
"qid_0_port_1_pinned_flows",
"qid_0_port_1_pinned_flows",
+ "qid_0_port_1_packets",
"qid_0_port_2_pinned_flows",
"qid_0_port_2_pinned_flows",
+ "qid_0_port_2_packets",
"qid_0_port_3_pinned_flows",
"qid_0_port_3_pinned_flows",
+ "qid_0_port_3_packets",
};
uint64_t queue_expected[] = {
7, /* rx */
};
uint64_t queue_expected[] = {
7, /* rx */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 7,
+ 0, 0,
};
uint64_t queue_expected_zero[] = {
0, /* rx */
};
uint64_t queue_expected_zero[] = {
0, /* rx */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
0, /* iq 1 used */
0, /* iq 2 used */
0, /* iq 3 used */
- 0, /* qid 0 port 0 pinned flows */
- 0, /* qid 0 port 1 pinned flows */
- 1, /* qid 0 port 2 pinned flows */
- 0, /* qid 0 port 4 pinned flows */
+ /* QID-to-Port: pinned_flows, packets */
+ 0, 0,
+ 0, 0,
+ 1, 0,
+ 0, 0,
};
if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
};
if (RTE_DIM(queue_expected) != NUM_Q_STATS ||
+ RTE_DIM(queue_expected_zero) != NUM_Q_STATS ||
RTE_DIM(queue_names) != NUM_Q_STATS) {
printf("%d : queue array of wrong size\n", __LINE__);
goto fail;
RTE_DIM(queue_names) != NUM_Q_STATS) {
printf("%d : queue array of wrong size\n", __LINE__);
goto fail;
failed = 1;
}
if (val != queue_expected[i]) {
failed = 1;
}
if (val != queue_expected[i]) {
- printf("%d: %s value incorrect, expected %"PRIu64
- " got %d\n", __LINE__, queue_names[i],
- queue_expected[i], id);
+ printf("%d: %d: %s value , expected %"PRIu64
+ " got %"PRIu64"\n", i, __LINE__,
+ queue_names[i], queue_expected[i], val);
failed = 1;
}
/* reset to zero */
failed = 1;
}
/* reset to zero */