X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Favf%2Favf_rxtx.c;h=1824ed70be9f2112f8816b57dfb6b8da3eb36657;hb=58f7db4396c0f6180cf94e977996af7ecdf08f2b;hp=baccec461bcee15a09621b6c1955472a08238184;hpb=a2b29a7733efa5d1344bb1a571269a17ecba21f0;p=dpdk.git diff --git a/drivers/net/avf/avf_rxtx.c b/drivers/net/avf/avf_rxtx.c index baccec461b..1824ed70be 100644 --- a/drivers/net/avf/avf_rxtx.c +++ b/drivers/net/avf/avf_rxtx.c @@ -17,7 +17,7 @@ #include #include #include -#include +#include #include #include #include @@ -92,6 +92,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh, return 0; } +#ifdef RTE_LIBRTE_AVF_INC_VECTOR +static inline bool +check_rx_vec_allow(struct avf_rx_queue *rxq) +{ + if (rxq->rx_free_thresh >= AVF_VPMD_RX_MAX_BURST && + rxq->nb_rx_desc % rxq->rx_free_thresh == 0) { + PMD_INIT_LOG(DEBUG, "Vector Rx can be enabled on this rxq."); + return TRUE; + } + + PMD_INIT_LOG(DEBUG, "Vector Rx cannot be enabled on this rxq."); + return FALSE; +} + +static inline bool +check_tx_vec_allow(struct avf_tx_queue *txq) +{ + if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) && + txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST && + txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) { + PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq."); + return TRUE; + } + PMD_INIT_LOG(DEBUG, "Vector Tx cannot be enabled on this txq."); + return FALSE; +} +#endif + +static inline bool +check_rx_bulk_allow(struct avf_rx_queue *rxq) +{ + int ret = TRUE; + + if (!(rxq->rx_free_thresh >= AVF_RX_MAX_BURST)) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->rx_free_thresh=%d, " + "AVF_RX_MAX_BURST=%d", + rxq->rx_free_thresh, AVF_RX_MAX_BURST); + ret = FALSE; + } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: " + "rxq->nb_rx_desc=%d, " + "rxq->rx_free_thresh=%d", + rxq->nb_rx_desc, rxq->rx_free_thresh); + ret = FALSE; + } + return ret; +} + static inline void reset_rx_queue(struct avf_rx_queue *rxq) { @@ -110,6 +159,11 @@ reset_rx_queue(struct avf_rx_queue *rxq) for (i = 0; i < AVF_RX_MAX_BURST; i++) rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf; + /* for rx bulk */ + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + rxq->rx_tail = 0; rxq->nb_rx_hold = 0; rxq->pkt_first_seg = NULL; @@ -205,6 +259,17 @@ release_rxq_mbufs(struct avf_rx_queue *rxq) rxq->sw_ring[i] = NULL; } } + + /* for rx bulk */ + if (rxq->rx_nb_avail == 0) + return; + for (i = 0; i < rxq->rx_nb_avail; i++) { + struct rte_mbuf *mbuf; + + mbuf = rxq->rx_stage[rxq->rx_next_avail + i]; + rte_pktmbuf_free_seg(mbuf); + } + rxq->rx_nb_avail = 0; } static inline void @@ -225,6 +290,14 @@ release_txq_mbufs(struct avf_tx_queue *txq) } } +static const struct avf_rxq_ops def_rxq_ops = { + .release_mbufs = release_rxq_mbufs, +}; + +static const struct avf_txq_ops def_txq_ops = { + .release_mbufs = release_txq_mbufs, +}; + int avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, @@ -325,7 +398,25 @@ avf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, rxq->q_set = TRUE; dev->data->rx_queues[queue_idx] = rxq; rxq->qrx_tail = hw->hw_addr + AVF_QRX_TAIL1(rxq->queue_id); + rxq->ops = &def_rxq_ops; + if (check_rx_bulk_allow(rxq) == TRUE) { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + } else { + PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested " + "on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + +#ifdef RTE_LIBRTE_AVF_INC_VECTOR + if (check_rx_vec_allow(rxq) == FALSE) + ad->rx_vec_allowed = false; +#endif return 0; } @@ -337,6 +428,8 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, const struct rte_eth_txconf *tx_conf) { struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct avf_adapter *ad = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); struct avf_tx_queue *txq; const struct rte_memzone *mz; uint32_t ring_size; @@ -381,7 +474,7 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->free_thresh = tx_free_thresh; txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = tx_conf->offloads; txq->tx_deferred_start = tx_conf->tx_deferred_start; /* Allocate software ring */ @@ -416,6 +509,12 @@ avf_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->q_set = TRUE; dev->data->tx_queues[queue_idx] = txq; txq->qtx_tail = hw->hw_addr + AVF_QTX_TAIL1(queue_idx); + txq->ops = &def_txq_ops; + +#ifdef RTE_LIBRTE_AVF_INC_VECTOR + if (check_tx_vec_allow(txq) == FALSE) + ad->tx_vec_allowed = false; +#endif return 0; } @@ -514,7 +613,7 @@ avf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) } rxq = dev->data->rx_queues[rx_queue_id]; - release_rxq_mbufs(rxq); + rxq->ops->release_mbufs(rxq); reset_rx_queue(rxq); dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -542,7 +641,7 @@ avf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) } txq = dev->data->tx_queues[tx_queue_id]; - release_txq_mbufs(txq); + txq->ops->release_mbufs(txq); reset_tx_queue(txq); dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -557,7 +656,7 @@ avf_dev_rx_queue_release(void *rxq) if (!q) return; - release_rxq_mbufs(q); + q->ops->release_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -571,7 +670,7 @@ avf_dev_tx_queue_release(void *txq) if (!q) return; - release_txq_mbufs(q); + q->ops->release_mbufs(q); rte_free(q->sw_ring); rte_memzone_free(q->mz); rte_free(q); @@ -595,7 +694,7 @@ avf_stop_queues(struct rte_eth_dev *dev) txq = dev->data->tx_queues[i]; if (!txq) continue; - release_txq_mbufs(txq); + txq->ops->release_mbufs(txq); reset_tx_queue(txq); dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } @@ -603,7 +702,7 @@ avf_stop_queues(struct rte_eth_dev *dev) rxq = dev->data->rx_queues[i]; if (!rxq) continue; - release_rxq_mbufs(rxq); + rxq->ops->release_mbufs(rxq); reset_rx_queue(rxq); dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; } @@ -987,6 +1086,252 @@ avf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +#define AVF_LOOK_AHEAD 8 +static inline int +avf_rx_scan_hw_ring(struct avf_rx_queue *rxq) +{ + volatile union avf_rx_desc *rxdp; + struct rte_mbuf **rxep; + struct rte_mbuf *mb; + uint16_t pkt_len; + uint64_t qword1; + uint32_t rx_status; + int32_t s[AVF_LOOK_AHEAD], nb_dd; + int32_t i, j, nb_rx = 0; + uint64_t pkt_flags; + static const uint32_t ptype_tbl[UINT8_MAX + 1] __rte_cache_aligned = { + /* [0] reserved */ + [1] = RTE_PTYPE_L2_ETHER, + /* [2] - [21] reserved */ + [22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [23] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + /* [25] reserved */ + [26] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [27] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [28] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + /* All others reserved */ + }; + + rxdp = &rxq->rx_ring[rxq->rx_tail]; + rxep = &rxq->sw_ring[rxq->rx_tail]; + + qword1 = rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len); + rx_status = (qword1 & AVF_RXD_QW1_STATUS_MASK) >> + AVF_RXD_QW1_STATUS_SHIFT; + + /* Make sure there is at least 1 packet to receive */ + if (!(rx_status & (1 << AVF_RX_DESC_STATUS_DD_SHIFT))) + return 0; + + /* Scan LOOK_AHEAD descriptors at a time to determine which + * descriptors reference packets that are ready to be received. + */ + for (i = 0; i < AVF_RX_MAX_BURST; i += AVF_LOOK_AHEAD, + rxdp += AVF_LOOK_AHEAD, rxep += AVF_LOOK_AHEAD) { + /* Read desc statuses backwards to avoid race condition */ + for (j = AVF_LOOK_AHEAD - 1; j >= 0; j--) { + qword1 = rte_le_to_cpu_64( + rxdp[j].wb.qword1.status_error_len); + s[j] = (qword1 & AVF_RXD_QW1_STATUS_MASK) >> + AVF_RXD_QW1_STATUS_SHIFT; + } + + rte_smp_rmb(); + + /* Compute how many status bits were set */ + for (j = 0, nb_dd = 0; j < AVF_LOOK_AHEAD; j++) + nb_dd += s[j] & (1 << AVF_RX_DESC_STATUS_DD_SHIFT); + + nb_rx += nb_dd; + + /* Translate descriptor info to mbuf parameters */ + for (j = 0; j < nb_dd; j++) { + AVF_DUMP_RX_DESC(rxq, &rxdp[j], + rxq->rx_tail + i * AVF_LOOK_AHEAD + j); + + mb = rxep[j]; + qword1 = rte_le_to_cpu_64 + (rxdp[j].wb.qword1.status_error_len); + pkt_len = ((qword1 & AVF_RXD_QW1_LENGTH_PBUF_MASK) >> + AVF_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len; + mb->data_len = pkt_len; + mb->pkt_len = pkt_len; + mb->ol_flags = 0; + avf_rxd_to_vlan_tci(mb, &rxdp[j]); + pkt_flags = avf_rxd_to_pkt_flags(qword1); + mb->packet_type = + ptype_tbl[(uint8_t)((qword1 & + AVF_RXD_QW1_PTYPE_MASK) >> + AVF_RXD_QW1_PTYPE_SHIFT)]; + + if (pkt_flags & PKT_RX_RSS_HASH) + mb->hash.rss = rte_le_to_cpu_32( + rxdp[j].wb.qword0.hi_dword.rss); + + mb->ol_flags |= pkt_flags; + } + + for (j = 0; j < AVF_LOOK_AHEAD; j++) + rxq->rx_stage[i + j] = rxep[j]; + + if (nb_dd != AVF_LOOK_AHEAD) + break; + } + + /* Clear software ring entries */ + for (i = 0; i < nb_rx; i++) + rxq->sw_ring[rxq->rx_tail + i] = NULL; + + return nb_rx; +} + +static inline uint16_t +avf_rx_fill_from_stage(struct avf_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t i; + struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail]; + + nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail); + + for (i = 0; i < nb_pkts; i++) + rx_pkts[i] = stage[i]; + + rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts); + rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts); + + return nb_pkts; +} + +static inline int +avf_rx_alloc_bufs(struct avf_rx_queue *rxq) +{ + volatile union avf_rx_desc *rxdp; + struct rte_mbuf **rxep; + struct rte_mbuf *mb; + uint16_t alloc_idx, i; + uint64_t dma_addr; + int diag; + + /* Allocate buffers in bulk */ + alloc_idx = (uint16_t)(rxq->rx_free_trigger - + (rxq->rx_free_thresh - 1)); + rxep = &rxq->sw_ring[alloc_idx]; + diag = rte_mempool_get_bulk(rxq->mp, (void *)rxep, + rxq->rx_free_thresh); + if (unlikely(diag != 0)) { + PMD_RX_LOG(ERR, "Failed to get mbufs in bulk"); + return -ENOMEM; + } + + rxdp = &rxq->rx_ring[alloc_idx]; + for (i = 0; i < rxq->rx_free_thresh; i++) { + if (likely(i < (rxq->rx_free_thresh - 1))) + /* Prefetch next mbuf */ + rte_prefetch0(rxep[i + 1]); + + mb = rxep[i]; + rte_mbuf_refcnt_set(mb, 1); + mb->next = NULL; + mb->data_off = RTE_PKTMBUF_HEADROOM; + mb->nb_segs = 1; + mb->port = rxq->port_id; + dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb)); + rxdp[i].read.hdr_addr = 0; + rxdp[i].read.pkt_addr = dma_addr; + } + + /* Update rx tail register */ + rte_wmb(); + AVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger); + + rxq->rx_free_trigger = + (uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh); + if (rxq->rx_free_trigger >= rxq->nb_rx_desc) + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + return 0; +} + +static inline uint16_t +rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue; + struct rte_eth_dev *dev; + uint16_t nb_rx = 0; + + if (!nb_pkts) + return 0; + + if (rxq->rx_nb_avail) + return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + nb_rx = (uint16_t)avf_rx_scan_hw_ring(rxq); + rxq->rx_next_avail = 0; + rxq->rx_nb_avail = nb_rx; + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx); + + if (rxq->rx_tail > rxq->rx_free_trigger) { + if (avf_rx_alloc_bufs(rxq) != 0) { + uint16_t i, j; + + /* TODO: count rx_mbuf_alloc_failed here */ + + rxq->rx_nb_avail = 0; + rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx); + for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++) + rxq->sw_ring[j] = rxq->rx_stage[i]; + + return 0; + } + } + + if (rxq->rx_tail >= rxq->nb_rx_desc) + rxq->rx_tail = 0; + + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u, nb_rx=%u", + rxq->port_id, rxq->queue_id, + rxq->rx_tail, nb_rx); + + if (rxq->rx_nb_avail) + return avf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts); + + return 0; +} + +static uint16_t +avf_recv_pkts_bulk_alloc(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_rx = 0, n, count; + + if (unlikely(nb_pkts == 0)) + return 0; + + if (likely(nb_pkts <= AVF_RX_MAX_BURST)) + return rx_recv_pkts(rx_queue, rx_pkts, nb_pkts); + + while (nb_pkts) { + n = RTE_MIN(nb_pkts, AVF_RX_MAX_BURST); + count = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); + nb_rx = (uint16_t)(nb_rx + count); + nb_pkts = (uint16_t)(nb_pkts - count); + if (count < n) + break; + } + + return nb_rx; +} + static inline int avf_xmit_cleanup(struct avf_tx_queue *txq) { @@ -1320,6 +1665,27 @@ end_of_tx: return nb_tx; } +static uint16_t +avf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t nb_tx = 0; + struct avf_tx_queue *txq = (struct avf_tx_queue *)tx_queue; + + while (nb_pkts) { + uint16_t ret, num; + + num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh); + ret = avf_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx], num); + nb_tx += ret; + nb_pkts -= ret; + if (ret < num) + break; + } + + return nb_tx; +} + /* TX prep functions */ uint16_t avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, @@ -1372,16 +1738,222 @@ avf_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, void avf_set_rx_function(struct rte_eth_dev *dev) { - if (dev->data->scattered_rx) + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_rx_queue *rxq; + int i; + + if (adapter->rx_vec_allowed) { + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using Vector Scattered Rx callback" + " (port=%d).", dev->data->port_id); + dev->rx_pkt_burst = avf_recv_scattered_pkts_vec; + } else { + PMD_DRV_LOG(DEBUG, "Using Vector Rx callback" + " (port=%d).", dev->data->port_id); + dev->rx_pkt_burst = avf_recv_pkts_vec; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + avf_rxq_vec_setup(rxq); + } + } else if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).", + dev->data->port_id); dev->rx_pkt_burst = avf_recv_scattered_pkts; - else + } else if (adapter->rx_bulk_alloc_allowed) { + PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).", + dev->data->port_id); + dev->rx_pkt_burst = avf_recv_pkts_bulk_alloc; + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).", + dev->data->port_id); dev->rx_pkt_burst = avf_recv_pkts; + } } /* choose tx function*/ void avf_set_tx_function(struct rte_eth_dev *dev) { - dev->tx_pkt_burst = avf_xmit_pkts; - dev->tx_pkt_prepare = avf_prep_pkts; + struct avf_adapter *adapter = + AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct avf_tx_queue *txq; + int i; + + if (adapter->tx_vec_allowed) { + PMD_DRV_LOG(DEBUG, "Using Vector Tx callback (port=%d).", + dev->data->port_id); + dev->tx_pkt_burst = avf_xmit_pkts_vec; + dev->tx_pkt_prepare = NULL; + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + avf_txq_vec_setup(txq); + } + } else { + PMD_DRV_LOG(DEBUG, "Using Basic Tx callback (port=%d).", + dev->data->port_id); + dev->tx_pkt_burst = avf_xmit_pkts; + dev->tx_pkt_prepare = avf_prep_pkts; + } +} + +void +avf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct avf_rx_queue *rxq; + + rxq = dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mp; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_rx_desc; + + qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; + qinfo->conf.rx_drop_en = TRUE; + qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; +} + +void +avf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct avf_tx_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_tx_desc; + + qinfo->conf.tx_free_thresh = txq->free_thresh; + qinfo->conf.tx_rs_thresh = txq->rs_thresh; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = txq->tx_deferred_start; +} + +/* Get the number of used descriptors of a rx queue */ +uint32_t +avf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id) +{ +#define AVF_RXQ_SCAN_INTERVAL 4 + volatile union avf_rx_desc *rxdp; + struct avf_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[queue_id]; + rxdp = &rxq->rx_ring[rxq->rx_tail]; + while ((desc < rxq->nb_rx_desc) && + ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & + AVF_RXD_QW1_STATUS_MASK) >> AVF_RXD_QW1_STATUS_SHIFT) & + (1 << AVF_RX_DESC_STATUS_DD_SHIFT)) { + /* Check the DD bit of a rx descriptor of each 4 in a group, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += AVF_RXQ_SCAN_INTERVAL; + rxdp += AVF_RXQ_SCAN_INTERVAL; + if (rxq->rx_tail + desc >= rxq->nb_rx_desc) + rxdp = &(rxq->rx_ring[rxq->rx_tail + + desc - rxq->nb_rx_desc]); + } + + return desc; +} + +int +avf_dev_rx_desc_status(void *rx_queue, uint16_t offset) +{ + struct avf_rx_queue *rxq = rx_queue; + volatile uint64_t *status; + uint64_t mask; + uint32_t desc; + + if (unlikely(offset >= rxq->nb_rx_desc)) + return -EINVAL; + + if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) + return RTE_ETH_RX_DESC_UNAVAIL; + + desc = rxq->rx_tail + offset; + if (desc >= rxq->nb_rx_desc) + desc -= rxq->nb_rx_desc; + + status = &rxq->rx_ring[desc].wb.qword1.status_error_len; + mask = rte_le_to_cpu_64((1ULL << AVF_RX_DESC_STATUS_DD_SHIFT) + << AVF_RXD_QW1_STATUS_SHIFT); + if (*status & mask) + return RTE_ETH_RX_DESC_DONE; + + return RTE_ETH_RX_DESC_AVAIL; +} + +int +avf_dev_tx_desc_status(void *tx_queue, uint16_t offset) +{ + struct avf_tx_queue *txq = tx_queue; + volatile uint64_t *status; + uint64_t mask, expect; + uint32_t desc; + + if (unlikely(offset >= txq->nb_tx_desc)) + return -EINVAL; + + desc = txq->tx_tail + offset; + /* go to next desc that has the RS bit */ + desc = ((desc + txq->rs_thresh - 1) / txq->rs_thresh) * + txq->rs_thresh; + if (desc >= txq->nb_tx_desc) { + desc -= txq->nb_tx_desc; + if (desc >= txq->nb_tx_desc) + desc -= txq->nb_tx_desc; + } + + status = &txq->tx_ring[desc].cmd_type_offset_bsz; + mask = rte_le_to_cpu_64(AVF_TXD_QW1_DTYPE_MASK); + expect = rte_cpu_to_le_64( + AVF_TX_DESC_DTYPE_DESC_DONE << AVF_TXD_QW1_DTYPE_SHIFT); + if ((*status & mask) == expect) + return RTE_ETH_TX_DESC_DONE; + + return RTE_ETH_TX_DESC_FULL; +} + +uint16_t __attribute__((weak)) +avf_recv_pkts_vec(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq) +{ + return -1; +} + +int __attribute__((weak)) +avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq) +{ + return -1; }