{
struct virtio_net_data_ll *dev_ll;
uint64_t tx_dropped, rx_dropped;
- uint64_t tx, tx_total, rx, rx_total;
+ uint64_t tx, tx_total, rx, rx_total, rx_ip_csum, rx_l4_csum;
uint32_t device_fh;
const char clr[] = { 27, '[', '2', 'J', '\0' };
const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
rx = rte_atomic64_read(
&dev_statistics[device_fh].rx_atomic);
rx_dropped = rx_total - rx;
+ rx_ip_csum = rte_atomic64_read(
+ &dev_statistics[device_fh].rx_bad_ip_csum);
+ rx_l4_csum = rte_atomic64_read(
+ &dev_statistics[device_fh].rx_bad_l4_csum);
printf("\nStatistics for device %"PRIu32" ----------"
"\nTX total: %"PRIu64""
"\nTX dropped: %"PRIu64""
"\nTX successful: %"PRIu64""
"\nRX total: %"PRIu64""
+ "\nRX bad IP csum: %"PRIu64""
+ "\nRX bad L4 csum: %"PRIu64""
"\nRX dropped: %"PRIu64""
"\nRX successful: %"PRIu64"",
device_fh,
tx_dropped,
tx,
rx_total,
+ rx_ip_csum,
+ rx_l4_csum,
rx_dropped,
rx);
uint64_t rx_total;
uint64_t tx;
rte_atomic64_t rx_atomic;
+ /**< Bad inner IP csum for tunneling pkt */
+ rte_atomic64_t rx_bad_ip_csum;
+ /**< Bad inner L4 csum for tunneling pkt */
+ rte_atomic64_t rx_bad_l4_csum;
} __rte_cache_aligned;
/**
struct rte_mbuf *pkts_valid[rx_count];
for (i = 0; i < rx_count; i++) {
+ if (enable_stats) {
+ rte_atomic64_add(
+ &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ (pkts_burst[i]->ol_flags & PKT_RX_IP_CKSUM_BAD)
+ != 0);
+ rte_atomic64_add(
+ &dev_statistics[dev->device_fh].rx_bad_ip_csum,
+ (pkts_burst[i]->ol_flags & PKT_RX_L4_CKSUM_BAD)
+ != 0);
+ }
ret = vxlan_rx_process(pkts_burst[i]);
if (unlikely(ret < 0))
continue;
extern uint8_t filter_idx;
extern uint8_t ports[RTE_MAX_ETHPORTS];
extern struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+extern uint32_t enable_stats;
+extern struct device_statistics dev_statistics[MAX_DEVICES];
typedef int (*ol_port_configure_t)(uint8_t port,
struct rte_mempool *mbuf_pool);