return 0;
}
+#define QEDE_MAX_BULK_ALLOC_COUNT 512
+
+static inline int qede_alloc_rx_bulk_mbufs(struct qede_rx_queue *rxq, int count)
+{
+ void *obj_p[QEDE_MAX_BULK_ALLOC_COUNT] __rte_cache_aligned;
+ struct rte_mbuf *mbuf = NULL;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ int i, ret = 0;
+ uint16_t idx;
+
+ idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+
+ if (count > QEDE_MAX_BULK_ALLOC_COUNT)
+ count = QEDE_MAX_BULK_ALLOC_COUNT;
+
+ ret = rte_mempool_get_bulk(rxq->mb_pool, obj_p, count);
+ if (unlikely(ret)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Failed to allocate %d rx buffers "
+ "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
+ count, idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
+ rte_mempool_avail_count(rxq->mb_pool),
+ rte_mempool_in_use_count(rxq->mb_pool));
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < count; i++) {
+ mbuf = obj_p[i];
+ if (likely(i < count - 1))
+ rte_prefetch0(obj_p[i + 1]);
+
+ idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
+ rxq->sw_rx_ring[idx].mbuf = mbuf;
+ rxq->sw_rx_ring[idx].page_offset = 0;
+ mapping = rte_mbuf_data_iova_default(mbuf);
+ rx_bd = (struct eth_rx_bd *)
+ ecore_chain_produce(&rxq->rx_bd_ring);
+ rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
+ rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
+ rxq->sw_rx_prod++;
+ }
+
+ return 0;
+}
+
/* Criterias for calculating Rx buffer size -
* 1) rx_buf_size should not exceed the size of mbuf
* 2) In scattered_rx mode - minimum rx_buf_size should be
void qede_rx_queue_release(void *rx_queue)
{
struct qede_rx_queue *rxq = rx_queue;
- struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (rxq) {
+ qdev = rxq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_rx_queue_release_mbufs(rxq);
qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
void qede_tx_queue_release(void *tx_queue)
{
struct qede_tx_queue *txq = tx_queue;
- struct qede_dev *qdev = txq->qdev;
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
-
- PMD_INIT_FUNC_TRACE(edev);
+ struct qede_dev *qdev;
+ struct ecore_dev *edev;
if (txq) {
+ qdev = txq->qdev;
+ edev = QEDE_INIT_EDEV(qdev);
+ PMD_INIT_FUNC_TRACE(edev);
qede_tx_queue_release_mbufs(txq);
qdev->ops->common->chain_free(edev, &txq->tx_pbl);
rte_free(txq->sw_tx_ring);
static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
{
uint32_t packet_type = RTE_PTYPE_UNKNOWN;
- struct ether_hdr *eth_hdr;
- struct ipv4_hdr *ipv4_hdr;
- struct ipv6_hdr *ipv6_hdr;
- struct vlan_hdr *vlan_hdr;
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_ipv4_hdr *ipv4_hdr;
+ struct rte_ipv6_hdr *ipv6_hdr;
+ struct rte_vlan_hdr *vlan_hdr;
uint16_t ethertype;
bool vlan_tagged = 0;
uint16_t len;
- eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- len = sizeof(struct ether_hdr);
+ eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
+ len = sizeof(struct rte_ether_hdr);
ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
/* Note: Valid only if VLAN stripping is disabled */
- if (ethertype == ETHER_TYPE_VLAN) {
+ if (ethertype == RTE_ETHER_TYPE_VLAN) {
vlan_tagged = 1;
- vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
- len += sizeof(struct vlan_hdr);
+ vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1);
+ len += sizeof(struct rte_vlan_hdr);
ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
}
- if (ethertype == ETHER_TYPE_IPv4) {
+ if (ethertype == RTE_ETHER_TYPE_IPV4) {
packet_type |= RTE_PTYPE_L3_IPV4;
- ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
+ ipv4_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv4_hdr *, len);
if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
packet_type |= RTE_PTYPE_L4_UDP;
- } else if (ethertype == ETHER_TYPE_IPv6) {
+ } else if (ethertype == RTE_ETHER_TYPE_IPV6) {
packet_type |= RTE_PTYPE_L3_IPV6;
- ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
+ ipv6_hdr = rte_pktmbuf_mtod_offset(m,
+ struct rte_ipv6_hdr *, len);
if (ipv6_hdr->proto == IPPROTO_TCP)
packet_type |= RTE_PTYPE_L4_TCP;
else if (ipv6_hdr->proto == IPPROTO_UDP)
static inline uint8_t
qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
{
- struct ipv4_hdr *ip;
+ struct rte_ipv4_hdr *ip;
uint16_t pkt_csum;
uint16_t calc_csum;
uint16_t val;
if (unlikely(val)) {
m->packet_type = qede_rx_cqe_to_pkt_type(flag);
if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
- ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
- sizeof(struct ether_hdr));
+ ip = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
+ sizeof(struct rte_ether_hdr));
pkt_csum = ip->hdr_checksum;
ip->hdr_checksum = 0;
calc_csum = rte_ipv4_cksum(ip);
struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
- uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
struct qede_rx_entry *curr_prod;
dma_addr_t new_mapping;
uint8_t bitfield_val;
#endif
uint8_t tunn_parse_flag;
- uint8_t j;
struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
uint64_t ol_flags;
uint32_t packet_type;
uint8_t offset, tpa_agg_idx, flags;
struct qede_agg_info *tpa_info = NULL;
uint32_t rss_hash;
+ int rx_alloc_count = 0;
+
+
+ /* Allocate buffers that we used in previous loop */
+ if (rxq->rx_alloc_count) {
+ if (unlikely(qede_alloc_rx_bulk_mbufs(rxq,
+ rxq->rx_alloc_count))) {
+ struct rte_eth_dev *dev;
+
+ PMD_RX_LOG(ERR, rxq,
+ "New buffer allocation failed,"
+ "dropping incoming packetn");
+ dev = &rte_eth_devices[rxq->port_id];
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_alloc_count;
+ rxq->rx_alloc_errors += rxq->rx_alloc_count;
+ return 0;
+ }
+ qede_update_rx_prod(qdev, rxq);
+ rxq->rx_alloc_count = 0;
+ }
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
rx_mb->hash.rss = rss_hash;
}
- if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
- PMD_RX_LOG(ERR, rxq,
- "New buffer allocation failed,"
- "dropping incoming packet\n");
- qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
- rte_eth_devices[rxq->port_id].
- data->rx_mbuf_alloc_failed++;
- rxq->rx_alloc_errors++;
- break;
- }
+ rx_alloc_count++;
qede_rx_bd_ring_consume(rxq);
if (!tpa_start_flg && fp_cqe->bd_num > 1) {
if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
pkt_len - len))
goto next_cqe;
- for (j = 0; j < num_segs; j++) {
- if (qede_alloc_rx_buffer(rxq)) {
- PMD_RX_LOG(ERR, rxq,
- "Buffer allocation failed");
- rte_eth_devices[rxq->port_id].
- data->rx_mbuf_alloc_failed++;
- rxq->rx_alloc_errors++;
- break;
- }
- rxq->rx_segs++;
- }
+
+ rx_alloc_count += num_segs;
+ rxq->rx_segs += num_segs;
}
rxq->rx_segs++; /* for the first segment */
}
}
- qede_update_rx_prod(qdev, rxq);
+ /* Request number of bufferes to be allocated in next loop */
+ rxq->rx_alloc_count = rx_alloc_count;
rxq->rcv_pkts += rx_pkt;
ol_flags = m->ol_flags;
if (ol_flags & PKT_TX_TCP_SEG) {
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
break;
}
/* TBD: confirm its ~9700B for both ? */
if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
break;
}
} else {
if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
break;
}
}
if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ /* We support only limited tunnel protocols */
+ if (ol_flags & PKT_TX_TUNNEL_MASK) {
+ uint64_t temp;
+
+ temp = ol_flags & PKT_TX_TUNNEL_MASK;
+ if (temp == PKT_TX_TUNNEL_VXLAN ||
+ temp == PKT_TX_TUNNEL_GENEVE ||
+ temp == PKT_TX_TUNNEL_MPLSINUDP ||
+ temp == PKT_TX_TUNNEL_GRE)
+ continue;
+ }
+
+ rte_errno = ENOTSUP;
break;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
break;
}
#endif
txq->nb_tx_avail -= bd1->data.nbds;
txq->sw_tx_prod++;
- rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
bd_prod =
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
#ifdef RTE_LIBRTE_QEDE_DEBUG_TX