struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t max_rx_pkt_len;
+ uint16_t max_rx_pktlen;
uint16_t bufsz;
int rc;
dev->data->rx_queues[qid] = NULL;
}
- max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
/* cache align the mbuf size to simplfy rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
- if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
+ (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
dev->data->scattered_rx = 1;
}
}
- rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
if (rc < 0)
return rc;
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
flags = fp_cqe->tunnel_pars_flags.flags;
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag))) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
if (rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
tpa_start_flg = true;
/* Mark it as LRO packet */
- ol_flags |= PKT_RX_LRO;
+ ol_flags |= RTE_MBUF_F_RX_LRO;
/* In split mode, seg_len is same as len_on_first_bd
* and bw_ext_bd_len_list will be empty since there are
* no additional buffers
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (tpa_start_flg)
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag)) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
/* RSS Hash */
if (qdev->rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
rte_errno = EINVAL;
break;
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
/* We support only limited tunnel protocols */
- if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
uint64_t temp;
- temp = ol_flags & PKT_TX_TUNNEL_MASK;
- if (temp == PKT_TX_TUNNEL_VXLAN ||
- temp == PKT_TX_TUNNEL_GENEVE ||
- temp == PKT_TX_TUNNEL_MPLSINUDP ||
- temp == PKT_TX_TUNNEL_GRE)
+ temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+ if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
+ temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GRE)
continue;
}
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
* offloads. Don't rely on pkt_type marked by Rx, instead use
* tx_ol_flags to decide.
*/
- tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+ tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
if (tunn_flg) {
/* Check against max which is Tunnel IPv6 + ext */
}
/* Outer IP checksum offload */
- if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_OUTER_IPV4)) {
+ if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_OUTER_IPV4)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
* and inner layers lengths need to be provided in
* mbuf.
*/
- if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
+ RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
mplsoudp_flg = true;
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
/* Mark inner IPv6 if present */
- if (tx_ol_flags & PKT_TX_IPV6)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
/* Inner L4 offsets */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM |
- PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
+ RTE_MBUF_F_TX_TCP_CKSUM))) {
/* Determines if BD3 is needed */
tunn_ipv6_ext_flg = true;
- if ((tx_ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_UDP_CKSUM) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_UDP_CKSUM) {
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
}
} /* End MPLSoUDP */
} /* End Tunnel handling */
- if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
mss = rte_cpu_to_le_16(mbuf->tso_segsz);
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum
* csum offload is requested then we need to force
* recalculation of L4 tunnel header csum also.
*/
- if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
- PKT_TX_TUNNEL_GRE)) {
+ if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
+ RTE_MBUF_F_TX_TUNNEL_GRE)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
}
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum