/*
* Known limitations:
* - RSS hash key and options cannot be modified.
- * - Hardware counters aren't implemented.
*/
/* System headers. */
struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
struct txq_elt *elt = &(*txq->elts)[elts_head];
unsigned int segs = NB_SEGS(buf);
-#ifdef MLX4_PMD_SOFT_COUNTERS
unsigned int sent_size = 0;
-#endif
uint32_t send_flags = 0;
/* Clean up old buffer. */
send_flags);
if (unlikely(err))
goto stop;
-#ifdef MLX4_PMD_SOFT_COUNTERS
sent_size += length;
-#endif
} else {
#if MLX4_PMD_SGE_WR_N > 1
struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
send_flags);
if (unlikely(err))
goto stop;
-#ifdef MLX4_PMD_SOFT_COUNTERS
sent_size += ret.length;
-#endif
#else /* MLX4_PMD_SGE_WR_N > 1 */
DEBUG("%p: TX scattered buffers support not"
" compiled in", (void *)txq);
#endif /* MLX4_PMD_SGE_WR_N > 1 */
}
elts_head = elts_head_next;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += sent_size;
-#endif
}
stop:
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
txq->stats.opackets += i;
-#endif
/* Ring QP doorbell. */
err = txq->if_qp->send_flush(txq->qp);
if (unlikely(err)) {
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment dropped packets counter. */
++rxq->stats.idropped;
-#endif
/* Link completed WRs together for repost. */
*next = wr;
next = &wr->next;
/* Return packet. */
*(pkts++) = pkt_buf;
++pkts_ret;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
rxq->stats.ibytes += pkt_buf_len;
-#endif
repost:
if (++elts_head >= elts_n)
elts_head = 0;
abort();
}
rxq->elts_head = elts_head;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
rxq->stats.ipackets += pkts_ret;
-#endif
return pkts_ret;
}
" completion status (%d): %s",
(void *)rxq, wc.wr_id, wc.status,
ibv_wc_status_str(wc.status));
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increment dropped packets counter. */
++rxq->stats.idropped;
-#endif
/* Add SGE to array for repost. */
sges[i] = elt->sge;
goto repost;
/* Return packet. */
*(pkts++) = seg;
++pkts_ret;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase bytes counter. */
rxq->stats.ibytes += len;
-#endif
repost:
if (++elts_head >= elts_n)
elts_head = 0;
abort();
}
rxq->elts_head = elts_head;
-#ifdef MLX4_PMD_SOFT_COUNTERS
/* Increase packets counter. */
rxq->stats.ipackets += pkts_ret;
-#endif
return pkts_ret;
}
continue;
idx = rxq->stats.idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.q_ipackets[idx] += rxq->stats.ipackets;
tmp.q_ibytes[idx] += rxq->stats.ibytes;
-#endif
tmp.q_errors[idx] += (rxq->stats.idropped +
rxq->stats.rx_nombuf);
}
-#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.ipackets += rxq->stats.ipackets;
tmp.ibytes += rxq->stats.ibytes;
-#endif
tmp.ierrors += rxq->stats.idropped;
tmp.rx_nombuf += rxq->stats.rx_nombuf;
}
continue;
idx = txq->stats.idx;
if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.q_opackets[idx] += txq->stats.opackets;
tmp.q_obytes[idx] += txq->stats.obytes;
-#endif
tmp.q_errors[idx] += txq->stats.odropped;
}
-#ifdef MLX4_PMD_SOFT_COUNTERS
tmp.opackets += txq->stats.opackets;
tmp.obytes += txq->stats.obytes;
-#endif
tmp.oerrors += txq->stats.odropped;
}
-#ifndef MLX4_PMD_SOFT_COUNTERS
- /* FIXME: retrieve and add hardware counters. */
-#endif
*stats = tmp;
priv_unlock(priv);
}
(*priv->txqs)[i]->stats =
(struct mlx4_txq_stats){ .idx = idx };
}
-#ifndef MLX4_PMD_SOFT_COUNTERS
- /* FIXME: reset hardware counters. */
-#endif
priv_unlock(priv);
}
#define MLX4_PMD_TX_MP_CACHE 8
#endif
-/*
- * If defined, only use software counters. The PMD will never ask the hardware
- * for these, and many of them won't be available.
- */
-#ifndef MLX4_PMD_SOFT_COUNTERS
-#define MLX4_PMD_SOFT_COUNTERS 1
-#endif
-
/* Alarm timeout. */
#define MLX4_ALARM_TIMEOUT_US 100000
struct mlx4_rxq_stats {
unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
uint64_t ibytes; /**< Total of successfully received bytes. */
-#endif
uint64_t idropped; /**< Total of packets dropped when RX ring full. */
uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
};
struct mlx4_txq_stats {
unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
uint64_t opackets; /**< Total of successfully sent packets. */
uint64_t obytes; /**< Total of successfully sent bytes. */
-#endif
uint64_t odropped; /**< Total of packets not sent when TX ring full. */
};