Because some apps may pass illegal parameters, driver increases
checks on illegal parameters and DFX statistics, which includes
sge_len0 and mbuf_null txq xstats member.
Signed-off-by: Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>
{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
{"copy_pkts", offsetof(struct hinic_txq_stats, cpy_pkts)},
{"rl_drop", offsetof(struct hinic_txq_stats, rl_drop)},
{"burst_pkts", offsetof(struct hinic_txq_stats, burst_pkts)},
+ {"sge_len0", offsetof(struct hinic_txq_stats, sge_len0)},
+ {"mbuf_null", offsetof(struct hinic_txq_stats, mbuf_null)},
};
#define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
};
#define HINIC_TXQ_XSTATS_NUM (sizeof(hinic_txq_stats_strings) / \
i = 0;
for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr;
sge_idx++) {
i = 0;
for (sge_idx = sges; (u64)sge_idx <= txq->sq_bot_sge_addr;
sge_idx++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
sge_idx = (struct hinic_sq_bufdesc *)
((void *)txq->sq_head_addr);
for (; i < nb_segs; i++) {
sge_idx = (struct hinic_sq_bufdesc *)
((void *)txq->sq_head_addr);
for (; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
} else {
/* wqe is in continuous space */
for (i = 0; i < nb_segs; i++) {
} else {
/* wqe is in continuous space */
for (i = 0; i < nb_segs; i++) {
+ if (unlikely(mbuf == NULL)) {
+ txq->txq_stats.mbuf_null++;
+ return false;
+ }
+
dma_addr = rte_mbuf_data_iova(mbuf);
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
mbuf = mbuf->next;
/* deal with the last mbuf */
dma_addr = rte_mbuf_data_iova(mbuf);
/* deal with the last mbuf */
dma_addr = rte_mbuf_data_iova(mbuf);
+ if (unlikely(mbuf->data_len == 0)) {
+ txq->txq_stats.sge_len0++;
+ return false;
+ }
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
if (unlikely(sqe_info->around))
hinic_set_sge((struct hinic_sge *)sge_idx, dma_addr,
mbuf->data_len);
if (unlikely(sqe_info->around))
u64 off_errs;
u64 cpy_pkts;
u64 burst_pkts;
u64 off_errs;
u64 cpy_pkts;
u64 burst_pkts;
+ u64 sge_len0;
+ u64 mbuf_null;
};
struct hinic_tx_info {
};
struct hinic_tx_info {