/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_SEG ||
- m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
rte_errno = -EINVAL;
return i;
}
- } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
- (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ } else if (m->nb_segs > I40E_TX_MAX_SEG ||
+ m->tso_segsz < I40E_MIN_TSO_MSS ||
+ m->tso_segsz > I40E_MAX_TSO_MSS) {
/* MSS outside the range (256B - 9674B) are considered
* malicious
*/
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len > I40E_FRAME_SIZE_MAX ||
+ m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
struct i40e_adapter *ad =
I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
- /* Use a simple Tx queue (no offloads, no multi segs) if possible */
- ad->tx_simple_allowed = (txq->offloads == 0 &&
- txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
ad->tx_vec_allowed = (ad->tx_simple_allowed &&
txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);