rte_mempool_in_use_count(rxq->mb_pool));
return -ENOMEM;
}
- rxq->sw_rx_ring[idx].mbuf = new_mb;
- rxq->sw_rx_ring[idx].page_offset = 0;
+ rxq->sw_rx_ring[idx] = new_mb;
mapping = rte_mbuf_data_iova_default(new_mb);
/* Advance PROD and get BD pointer */
rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
{
- void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
struct rte_mbuf *mbuf = NULL;
struct eth_rx_bd *rx_bd;
dma_addr_t mapping;
int i, ret = 0;
uint16_t idx;
+ uint16_t mask = NUM_RX_BDS(rxq);
if (count > QEDE_MAX_BULK_ALLOC_COUNT)
count = QEDE_MAX_BULK_ALLOC_COUNT;
- ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+ idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ if (count > mask - idx + 1)
+ count = mask - idx + 1;
+
+ ret = rte_mempool_get_bulk(rxq->mb_pool, (void **)&rxq->sw_rx_ring[idx],
+ count);
+
if (unlikely(ret)) {
PMD_RX_LOG(ERR, rxq,
"Failed to allocate %d rx buffers "
}
for (i = 0; i < count; i++) {
- mbuf = obj_p[i];
- if (likely(i < count - 1))
- rte_prefetch0(obj_p[i + 1]);
+ rte_prefetch0(rxq->sw_rx_ring[(idx + 1) & NUM_RX_BDS(rxq)]);
+ mbuf = rxq->sw_rx_ring[idx & NUM_RX_BDS(rxq)];
- idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
- rxq->sw_rx_ring[idx].mbuf = mbuf;
- rxq->sw_rx_ring[idx].page_offset = 0;
mapping = rte_mbuf_data_iova_default(mbuf);
rx_bd = (struct eth_rx_bd *)
ecore_chain_produce(&rxq->rx_bd_ring);
rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- rxq->sw_rx_prod++;
+ idx++;
}
+ rxq->sw_rx_prod = idx;
return 0;
}
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t max_rx_pkt_len;
+ uint16_t max_rx_pktlen;
uint16_t bufsz;
int rc;
dev->data->rx_queues[qid] = NULL;
}
- max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
/* cache align the mbuf size to simplfy rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
- if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
+ (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
dev->data->scattered_rx = 1;
}
}
- rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
if (rc < 0)
return rc;
if (rxq->sw_rx_ring) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
- if (rxq->sw_rx_ring[i].mbuf) {
- rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
- rxq->sw_rx_ring[i].mbuf = NULL;
+ if (rxq->sw_rx_ring[i]) {
+ rte_pktmbuf_free(rxq->sw_rx_ring[i]);
+ rxq->sw_rx_ring[i] = NULL;
}
}
}
{
uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
- struct eth_rx_prod_data rx_prods = { 0 };
+ struct eth_rx_prod_data rx_prods;
/* Update producers */
+ memset(&rx_prods, 0, sizeof(rx_prods));
rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
static inline void
qede_reuse_page(__rte_unused struct qede_dev *qdev,
- struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
+ struct qede_rx_queue *rxq, struct rte_mbuf *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
- struct qede_rx_entry *curr_prod;
dma_addr_t new_mapping;
- curr_prod = &rxq->sw_rx_ring[idx];
- *curr_prod = *curr_cons;
+ rxq->sw_rx_ring[idx] = curr_cons;
- new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
- curr_prod->page_offset;
+ new_mapping = rte_mbuf_data_iova_default(curr_cons);
rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
struct qede_dev *qdev, uint8_t count)
{
- struct qede_rx_entry *curr_cons;
+ struct rte_mbuf *curr_cons;
for (; count > 0; count--) {
- curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
+ curr_cons = rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
qede_reuse_page(qdev, rxq, curr_cons);
qede_rx_bd_ring_consume(rxq);
}
if (rte_le_to_cpu_16(len)) {
tpa_info = &rxq->tpa_info[agg_index];
cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ curr_frag = rxq->sw_rx_ring[cons_idx];
assert(curr_frag);
curr_frag->nb_segs = 1;
curr_frag->pkt_len = rte_le_to_cpu_16(len);
return -EINVAL;
}
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ seg2 = rxq->sw_rx_ring[sw_rx_index];
qede_rx_bd_ring_consume(rxq);
pkt_len -= cur_size;
seg2->data_len = cur_size;
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & num_rx_bds;
- rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ rx_mb = rxq->sw_rx_ring[sw_rx_index];
assert(rx_mb != NULL);
parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
flags = fp_cqe->tunnel_pars_flags.flags;
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (unlikely(CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag))) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
if (rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & num_rx_bds;
- rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
/* Update rest of the MBUF fields */
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
tpa_start_flg = true;
/* Mark it as LRO packet */
- ol_flags |= PKT_RX_LRO;
+ ol_flags |= RTE_MBUF_F_RX_LRO;
/* In split mode, seg_len is same as len_on_first_bd
* and bw_ext_bd_len_list will be empty since there are
* no additional buffers
/* Get the data from the SW ring */
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ rx_mb = rxq->sw_rx_ring[sw_rx_index];
assert(rx_mb != NULL);
/* Handle regular CQE or TPA start CQE */
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
"Outer L3 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (tpa_start_flg)
"L4 csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
parse_flag);
rxq->rx_hw_errors++;
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
} else {
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
}
if (CQE_HAS_VLAN(parse_flag) ||
CQE_HAS_OUTER_VLAN(parse_flag)) {
/* Note: FW doesn't indicate Q-in-Q packet */
- ol_flags |= PKT_RX_VLAN;
+ ol_flags |= RTE_MBUF_F_RX_VLAN;
if (qdev->vlan_strip_flg) {
- ol_flags |= PKT_RX_VLAN_STRIPPED;
+ ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED;
rx_mb->vlan_tci = vlan_tci;
}
}
/* RSS Hash */
if (qdev->rss_enable) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rx_mb->hash.rss = rss_hash;
}
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
- rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
+ rte_prefetch0(rxq->sw_rx_ring[preload_idx]);
/* Update rest of the MBUF fields */
rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
for (i = 0; i < nb_pkts; i++) {
m = tx_pkts[i];
ol_flags = m->ol_flags;
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
rte_errno = EINVAL;
break;
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
/* We support only limited tunnel protocols */
- if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
uint64_t temp;
- temp = ol_flags & PKT_TX_TUNNEL_MASK;
- if (temp == PKT_TX_TUNNEL_VXLAN ||
- temp == PKT_TX_TUNNEL_GENEVE ||
- temp == PKT_TX_TUNNEL_MPLSINUDP ||
- temp == PKT_TX_TUNNEL_GRE)
+ temp = ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK;
+ if (temp == RTE_MBUF_F_TX_TUNNEL_VXLAN ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GENEVE ||
+ temp == RTE_MBUF_F_TX_TUNNEL_MPLSINUDP ||
+ temp == RTE_MBUF_F_TX_TUNNEL_GRE)
continue;
}
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM)))
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM)))
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
* offloads. Don't rely on pkt_type marked by Rx, instead use
* tx_ol_flags to decide.
*/
- tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+ tunn_flg = !!(tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK);
if (tunn_flg) {
/* Check against max which is Tunnel IPv6 + ext */
}
/* Outer IP checksum offload */
- if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
- PKT_TX_OUTER_IPV4)) {
+ if (tx_ol_flags & (RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ RTE_MBUF_F_TX_OUTER_IPV4)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
* and inner layers lengths need to be provided in
* mbuf.
*/
- if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
- PKT_TX_TUNNEL_MPLSINUDP) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ==
+ RTE_MBUF_F_TX_TUNNEL_MPLSINUDP) {
mplsoudp_flg = true;
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
/* Mark inner IPv6 if present */
- if (tx_ol_flags & PKT_TX_IPV6)
+ if (tx_ol_flags & RTE_MBUF_F_TX_IPV6)
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
/* Inner L4 offsets */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM |
- PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM |
+ RTE_MBUF_F_TX_TCP_CKSUM))) {
/* Determines if BD3 is needed */
tunn_ipv6_ext_flg = true;
- if ((tx_ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_UDP_CKSUM) {
+ if ((tx_ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_UDP_CKSUM) {
bd2_bf1 |=
1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
}
} /* End MPLSoUDP */
} /* End Tunnel handling */
- if (tx_ol_flags & PKT_TX_TCP_SEG) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
lso_flg = true;
if (unlikely(txq->nb_tx_avail <
ETH_TX_MIN_BDS_PER_LSO_PKT))
bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
+ /* RTE_MBUF_F_TX_TCP_SEG implies RTE_MBUF_F_TX_TCP_CKSUM */
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
mss = rte_cpu_to_le_16(mbuf->tso_segsz);
}
/* Descriptor based VLAN insertion */
- if (tx_ol_flags & PKT_TX_VLAN_PKT) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_VLAN) {
vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
/* Offload the IP checksum in the hardware */
- if (tx_ol_flags & PKT_TX_IP_CKSUM) {
+ if (tx_ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum
* csum offload is requested then we need to force
* recalculation of L4 tunnel header csum also.
*/
- if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
- PKT_TX_TUNNEL_GRE)) {
+ if (tunn_flg && ((tx_ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) !=
+ RTE_MBUF_F_TX_TUNNEL_GRE)) {
bd1_bd_flags_bf |=
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
}
/* L4 checksum offload (tcp or udp) */
- if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
- (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
+ if ((tx_ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)) &&
+ (tx_ol_flags & (RTE_MBUF_F_TX_UDP_CKSUM | RTE_MBUF_F_TX_TCP_CKSUM))) {
bd1_bd_flags_bf |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
/* There's no DPDK flag to request outer-L4 csum