#include "ngbe_ethdev.h"
#include "ngbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define NGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define NGBE_TX_IEEE1588_TMST 0
+#endif
+
/* Bit Mask to indicate what bits required for building Tx context */
static const u64 NGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
RTE_MBUF_F_TX_OUTER_IPV6 |
RTE_MBUF_F_TX_L4_MASK |
RTE_MBUF_F_TX_TCP_SEG |
RTE_MBUF_F_TX_TUNNEL_MASK |
- RTE_MBUF_F_TX_OUTER_IP_CKSUM);
+ RTE_MBUF_F_TX_OUTER_IP_CKSUM |
+ NGBE_TX_IEEE1588_TMST);
+
#define NGBE_TX_OFFLOAD_NOTSUP_MASK \
(RTE_MBUF_F_TX_OFFLOAD_MASK ^ NGBE_TX_OFFLOAD_MASK)
*/
cmd_type_len = NGBE_TXD_FCS;
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cmd_type_len |= NGBE_TXD_1588;
+#endif
+
olinfo_status = 0;
if (tx_ol_req) {
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
0, 0, 0, RTE_MBUF_F_RX_FDIR,
};
+#ifdef RTE_LIBRTE_IEEE1588
+ static uint64_t ip_pkt_etqf_map[8] = {
+ 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, 0,
+ };
+ int etfid = ngbe_etflt_id(NGBE_RXD_PTID(pkt_info));
+ if (likely(-1 != etfid))
+ return ip_pkt_etqf_map[etfid] |
+ ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
+ else
+ return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
+#else
return ip_rss_types_map[NGBE_RXD_RSSTYPE(pkt_info)];
+#endif
}
static inline uint64_t
vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
? vlan_flags : 0;
+#ifdef RTE_LIBRTE_IEEE1588
+ if (rx_status & NGBE_RXD_STAT_1588)
+ pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
return pkt_flags;
}
}
}
+static int
+ngbe_tx_done_cleanup_full(struct ngbe_tx_queue *txq, uint32_t free_cnt)
+{
+ struct ngbe_tx_entry *swr_ring = txq->sw_ring;
+ uint16_t i, tx_last, tx_id;
+ uint16_t nb_tx_free_last;
+ uint16_t nb_tx_to_clean;
+ uint32_t pkt_cnt;
+
+ /* Start free mbuf from the next of tx_tail */
+ tx_last = txq->tx_tail;
+ tx_id = swr_ring[tx_last].next_id;
+
+ if (txq->nb_tx_free == 0 && ngbe_xmit_cleanup(txq))
+ return 0;
+
+ nb_tx_to_clean = txq->nb_tx_free;
+ nb_tx_free_last = txq->nb_tx_free;
+ if (!free_cnt)
+ free_cnt = txq->nb_tx_desc;
+
+ /* Loop through swr_ring to count the amount of
+ * freeable mubfs and packets.
+ */
+ for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+ for (i = 0; i < nb_tx_to_clean &&
+ pkt_cnt < free_cnt &&
+ tx_id != tx_last; i++) {
+ if (swr_ring[tx_id].mbuf != NULL) {
+ rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+ swr_ring[tx_id].mbuf = NULL;
+
+ /*
+ * last segment in the packet,
+ * increment packet count
+ */
+ pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+ }
+
+ tx_id = swr_ring[tx_id].next_id;
+ }
+
+ if (pkt_cnt < free_cnt) {
+ if (ngbe_xmit_cleanup(txq))
+ break;
+
+ nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+ nb_tx_free_last = txq->nb_tx_free;
+ }
+ }
+
+ return (int)pkt_cnt;
+}
+
+static int
+ngbe_tx_done_cleanup_simple(struct ngbe_tx_queue *txq,
+ uint32_t free_cnt)
+{
+ int i, n, cnt;
+
+ if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+ free_cnt = txq->nb_tx_desc;
+
+ cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+ for (i = 0; i < cnt; i += n) {
+ if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+ break;
+
+ n = ngbe_tx_free_bufs(txq);
+
+ if (n == 0)
+ break;
+ }
+
+ return i;
+}
+
+int
+ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+ struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
+ if (txq->offloads == 0 &&
+ txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
+ return ngbe_tx_done_cleanup_simple(txq, free_cnt);
+
+ return ngbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
static void
ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
{
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->queue_id = queue_idx;
- txq->reg_idx = queue_idx;
+ txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
txq->port_id = dev->data->port_id;
txq->offloads = offloads;
txq->ops = &def_txq_ops;
rxq->nb_rx_desc = nb_desc;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
- rxq->reg_idx = queue_idx;
+ rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
+ queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = RTE_ETHER_CRC_LEN;
return 0;
}
+uint32_t
+ngbe_dev_rx_queue_count(void *rx_queue)
+{
+#define NGBE_RXQ_SCAN_INTERVAL 4
+ volatile struct ngbe_rx_desc *rxdp;
+ struct ngbe_rx_queue *rxq = rx_queue;
+ uint32_t desc = 0;
+
+ rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+ while ((desc < rxq->nb_rx_desc) &&
+ (rxdp->qw1.lo.status &
+ rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) {
+ desc += NGBE_RXQ_SCAN_INTERVAL;
+ rxdp += NGBE_RXQ_SCAN_INTERVAL;
+ if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+ rxdp = &(rxq->rx_ring[rxq->rx_tail +
+ desc - rxq->nb_rx_desc]);
+ }
+
+ return desc;
+}
+
+int
+ngbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ngbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].qw1.lo.status;
+ if (*status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ngbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ngbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].dw3;
+ if (*status & rte_cpu_to_le_32(NGBE_TXD_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
ngbe_dev_clear_queues(struct rte_eth_dev *dev)
{
static int
ngbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
{
- switch (dev->data->dev_conf.rxmode.mq_mode) {
- case RTE_ETH_MQ_RX_RSS:
- ngbe_rss_configure(dev);
- break;
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ switch (dev->data->dev_conf.rxmode.mq_mode) {
+ case RTE_ETH_MQ_RX_RSS:
+ ngbe_rss_configure(dev);
+ break;
- case RTE_ETH_MQ_RX_NONE:
- default:
- /* if mq_mode is none, disable rss mode.*/
- ngbe_rss_disable(dev);
- break;
+ case RTE_ETH_MQ_RX_NONE:
+ default:
+ /* if mq_mode is none, disable rss mode.*/
+ ngbe_rss_disable(dev);
+ break;
+ }
}
return 0;
return 0;
}
+
+void
+ngbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct ngbe_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct ngbe_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}