#include <inttypes.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include <rte_net.h>
#include "fm10k.h"
#define rte_packet_prefetch(p) do {} while (0)
#endif
-#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+#ifdef RTE_ETHDEV_DEBUG_RX
static inline void dump_rxd(union fm10k_rx_desc *rxd)
{
PMD_RX_LOG(DEBUG, "+----------------|----------------+");
#define FM10K_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
break;
mbuf = q->sw_ring[next_dd];
desc = q->hw_ring[next_dd];
-#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+#ifdef RTE_ETHDEV_DEBUG_RX
dump_rxd(&desc);
#endif
rte_pktmbuf_pkt_len(mbuf) = desc.w.length;
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
mbuf->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
break;
mbuf = q->sw_ring[next_dd];
desc = q->hw_ring[next_dd];
-#ifdef RTE_LIBRTE_FM10K_DEBUG_RX
+#ifdef RTE_ETHDEV_DEBUG_RX
dump_rxd(&desc);
#endif
* So, always PKT_RX_VLAN flag is set and vlan_tci
* is valid for each RX packet's mbuf.
*/
- first_seg->ol_flags |= PKT_RX_VLAN;
+ first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
first_seg->vlan_tci = desc.w.vlan;
/**
* mbuf->vlan_tci_outer is an idle field in fm10k driver,
return nb_rcv;
}
-int
-fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
+uint32_t
+fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
+#define FM10K_RXQ_SCAN_INTERVAL 4
volatile union fm10k_rx_desc *rxdp;
- struct fm10k_rx_queue *rxq = rx_queue;
- uint16_t desc;
- int ret;
+ struct fm10k_rx_queue *rxq;
+ uint16_t desc = 0;
- if (unlikely(offset >= rxq->nb_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
- return 0;
+ rxq = dev->data->rx_queues[rx_queue_id];
+ rxdp = &rxq->hw_ring[rxq->next_dd];
+ while ((desc < rxq->nb_desc) &&
+ rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) {
+ /**
+ * Check the DD bit of a rx descriptor of each group of 4 desc,
+ * to avoid checking too frequently and downgrading performance
+ * too much.
+ */
+ desc += FM10K_RXQ_SCAN_INTERVAL;
+ rxdp += FM10K_RXQ_SCAN_INTERVAL;
+ if (rxq->next_dd + desc >= rxq->nb_desc)
+ rxdp = &rxq->hw_ring[rxq->next_dd + desc -
+ rxq->nb_desc];
}
- desc = rxq->next_dd + offset;
- if (desc >= rxq->nb_desc)
- desc -= rxq->nb_desc;
-
- rxdp = &rxq->hw_ring[desc];
-
- ret = !!(rxdp->w.status &
- rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
-
- return ret;
+ return desc;
}
int
/* set vlan if requested */
if (mb->ol_flags & PKT_TX_VLAN_PKT)
q->hw_ring[q->next_free].vlan = mb->vlan_tci;
+ else
+ q->hw_ring[q->next_free].vlan = 0;
q->sw_ring[q->next_free] = mb;
q->hw_ring[q->next_free].buffer_addr =
rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
if (mb->ol_flags & PKT_TX_TCP_SEG) {
- hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
- mb->l3_len + mb->l4_len;
+ hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+ hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ?
+ mb->outer_l2_len + mb->outer_l3_len : 0;
if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
hdrlen += sizeof(struct fm10k_ftag);
if ((m->ol_flags & PKT_TX_TCP_SEG) &&
(m->tso_segsz < FM10K_TSO_MINMSS)) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
- rte_errno = -ENOTSUP;
+ rte_errno = ENOTSUP;
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}