unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+static uint16_t avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
static void avp_dev_rx_queue_release(void *rxq);
static void avp_dev_tx_queue_release(void *txq);
+
+static void avp_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avp_dev_stats_reset(struct rte_eth_dev *dev);
+
+
#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+#define AVP_MAX_RX_BURST 64
+#define AVP_MAX_TX_BURST 64
#define AVP_MAX_MAC_ADDRS 1
#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
.dev_configure = avp_dev_configure,
.dev_infos_get = avp_dev_info_get,
.vlan_offload_set = avp_vlan_offload_set,
+ .stats_get = avp_dev_stats_get,
+ .stats_reset = avp_dev_stats_reset,
.link_update = avp_dev_link_update,
.rx_queue_setup = avp_dev_rx_queue_setup,
.rx_queue_release = avp_dev_rx_queue_release,
return ret == 0 ? request.result : ret;
}
+/* translate from host mbuf virtual address to guest virtual address */
+static inline void *
+avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
+{
+ return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
+ (uintptr_t)avp->host_mbuf_addr),
+ (uintptr_t)avp->mbuf_addr);
+}
+
/* translate from host physical address to guest virtual address */
static void *
avp_dev_translate_address(struct rte_eth_dev *eth_dev,
pci_dev = AVP_DEV_TO_PCI(eth_dev);
eth_dev->dev_ops = &avp_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avp_recv_pkts;
+ eth_dev->tx_pkt_burst = &avp_xmit_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
/*
* be mapped to the same virtual address so all pointers should
* be valid.
*/
+ if (eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
return 0;
}
.dev_private_size = sizeof(struct avp_adapter),
};
+static int
+avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
+ struct avp_dev *avp)
+{
+ unsigned int max_rx_pkt_len;
+
+ max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the guest MTU is greater than either the host or guest
+ * buffers then chained mbufs have to be enabled in the TX
+ * direction. It is assumed that the application will not need
+ * to send packets larger than their max_rx_pkt_len (MRU).
+ */
+ return 1;
+ }
+
+ if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the host MRU is greater than its own mbuf size or the
+ * guest mbuf size then chained mbufs have to be enabled in the
+ * RX direction.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
static int
avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id,
avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
+ if (avp_dev_enable_scattered(eth_dev, avp)) {
+ if (!eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->data->scattered_rx = 1;
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ }
+
PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
avp->max_rx_pkt_len,
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
return 0;
}
+static inline int
+_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+{
+ uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
+ uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
+ return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
+}
+
+static inline int
+_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
+{
+ struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) {
+ /* allow all packets destined to our address */
+ return 0;
+ }
+
+ if (likely(is_broadcast_ether_addr(ð->d_addr))) {
+ /* allow all broadcast packets */
+ return 0;
+ }
+
+ if (likely(is_multicast_ether_addr(ð->d_addr))) {
+ /* allow all multicast packets */
+ return 0;
+ }
+
+ if (avp->flags & AVP_F_PROMISC) {
+ /* allow all packets when in promiscuous mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+static inline void
+__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
+{
+ struct rte_avp_desc *first_buf;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int pkt_len;
+ unsigned int nb_segs;
+ void *pkt_data;
+ unsigned int i;
+
+ first_buf = avp_dev_translate_buffer(avp, buf);
+
+ i = 0;
+ pkt_len = 0;
+ nb_segs = first_buf->nb_segs;
+ do {
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ if (pkt_buf == NULL)
+ rte_panic("bad buffer: segment %u has an invalid address %p\n",
+ i, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ if (pkt_data == NULL)
+ rte_panic("bad buffer: segment %u has a NULL data pointer\n",
+ i);
+ if (pkt_buf->data_len == 0)
+ rte_panic("bad buffer: segment %u has 0 data length\n",
+ i);
+ pkt_len += pkt_buf->data_len;
+ nb_segs--;
+ i++;
+
+ } while (nb_segs && (buf = pkt_buf->next) != NULL);
+
+ if (nb_segs != 0)
+ rte_panic("bad buffer: expected %u segments found %u\n",
+ first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
+ if (pkt_len != first_buf->pkt_len)
+ rte_panic("bad buffer: expected length %u found %u\n",
+ first_buf->pkt_len, pkt_len);
+}
+
+#define avp_dev_buffer_sanity_check(a, b) \
+ __avp_dev_buffer_sanity_check((a), (b))
+
+#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
+
+#define avp_dev_buffer_sanity_check(a, b) do {} while (0)
+
+#endif
+
+/*
+ * Copy a host buffer chain to a set of mbufs. This function assumes that
+ * there exactly the required number of mbufs to copy all source bytes.
+ */
+static inline struct rte_mbuf *
+avp_dev_copy_from_buffers(struct avp_dev *avp,
+ struct rte_avp_desc *buf,
+ struct rte_mbuf **mbufs,
+ unsigned int count)
+{
+ struct rte_mbuf *m_previous = NULL;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int total_length = 0;
+ unsigned int copy_length;
+ unsigned int src_offset;
+ struct rte_mbuf *m;
+ uint16_t ol_flags;
+ uint16_t vlan_tci;
+ void *pkt_data;
+ unsigned int i;
+
+ avp_dev_buffer_sanity_check(avp, buf);
+
+ /* setup the first source buffer */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ total_length = pkt_buf->pkt_len;
+ src_offset = 0;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ ol_flags = PKT_RX_VLAN_PKT;
+ vlan_tci = pkt_buf->vlan_tci;
+ } else {
+ ol_flags = 0;
+ vlan_tci = 0;
+ }
+
+ for (i = 0; (i < count) && (buf != NULL); i++) {
+ /* fill each destination buffer */
+ m = mbufs[i];
+
+ if (m_previous != NULL)
+ m_previous->next = m;
+
+ m_previous = m;
+
+ do {
+ /*
+ * Copy as many source buffers as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->guest_mbuf_size -
+ rte_pktmbuf_data_len(m)),
+ (pkt_buf->data_len -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ rte_pktmbuf_data_len(m)),
+ RTE_PTR_ADD(pkt_data, src_offset),
+ copy_length);
+ rte_pktmbuf_data_len(m) += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == pkt_buf->data_len)) {
+ /* need a new source buffer */
+ buf = pkt_buf->next;
+ if (buf != NULL) {
+ pkt_buf = avp_dev_translate_buffer(
+ avp, buf);
+ pkt_data = avp_dev_translate_buffer(
+ avp, pkt_buf->data);
+ src_offset = 0;
+ }
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(m) ==
+ avp->guest_mbuf_size)) {
+ /* need a new destination mbuf */
+ break;
+ }
+
+ } while (buf != NULL);
+ }
+
+ m = mbufs[0];
+ m->ol_flags = ol_flags;
+ m->nb_segs = count;
+ rte_pktmbuf_pkt_len(m) = total_length;
+ m->vlan_tci = vlan_tci;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ return m;
+}
+
+static uint16_t
+avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ struct rte_avp_desc *buf;
+ unsigned int count, avail, n;
+ unsigned int guest_mbuf_size;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int buf_len;
+ unsigned int port_id;
+ unsigned int i;
+
+ guest_mbuf_size = avp->guest_mbuf_size;
+ port_id = avp->port_id;
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i + 1 < n) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+ buf = avp_bufs[i];
+
+ /* Peek into the first buffer to determine the total length */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ buf_len = pkt_buf->pkt_len;
+
+ /* Allocate enough mbufs to receive the entire packet */
+ required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
+ if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* Copy the data from the buffers to our mbufs */
+ m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
+
+ /* finalize mbuf */
+ m->port = port_id;
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += buf_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+
+static uint16_t
+avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ unsigned int count, avail, n;
+ unsigned int pkt_len;
+ struct rte_mbuf *m;
+ char *pkt_data;
+ unsigned int i;
+
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i < n - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust host pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = pkt_buf->pkt_len;
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_buf->nb_segs > 1))) {
+ /*
+ * application should be using the scattered receive
+ * function
+ */
+ rxq->errors++;
+ continue;
+ }
+
+ /* process each packet to be transmitted */
+ m = rte_pktmbuf_alloc(avp->pool);
+ if (unlikely(m == NULL)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* copy data out of the host buffer to our buffer */
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
+
+ /* initialize the local mbuf */
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ m->port = avp->port_id;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ m->ol_flags = PKT_RX_VLAN_PKT;
+ m->vlan_tci = pkt_buf->vlan_tci;
+ }
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += pkt_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+/*
+ * Copy a chained mbuf to a set of host buffers. This function assumes that
+ * there are sufficient destination buffers to contain the entire source
+ * packet.
+ */
+static inline uint16_t
+avp_dev_copy_to_buffers(struct avp_dev *avp,
+ struct rte_mbuf *mbuf,
+ struct rte_avp_desc **buffers,
+ unsigned int count)
+{
+ struct rte_avp_desc *previous_buf = NULL;
+ struct rte_avp_desc *first_buf = NULL;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_desc *buf;
+ size_t total_length;
+ struct rte_mbuf *m;
+ size_t copy_length;
+ size_t src_offset;
+ char *pkt_data;
+ unsigned int i;
+
+ __rte_mbuf_sanity_check(mbuf, 1);
+
+ m = mbuf;
+ src_offset = 0;
+ total_length = rte_pktmbuf_pkt_len(m);
+ for (i = 0; (i < count) && (m != NULL); i++) {
+ /* fill each destination buffer */
+ buf = buffers[i];
+
+ if (i < count - 1) {
+ /* prefetch next entry while processing this one */
+ pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+
+ /* setup the buffer chain */
+ if (previous_buf != NULL)
+ previous_buf->next = buf;
+ else
+ first_buf = pkt_buf;
+
+ previous_buf = pkt_buf;
+
+ do {
+ /*
+ * copy as many source mbuf segments as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->host_mbuf_size -
+ pkt_buf->data_len),
+ (rte_pktmbuf_data_len(m) -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
+ RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ src_offset),
+ copy_length);
+ pkt_buf->data_len += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == rte_pktmbuf_data_len(m))) {
+ /* need a new source buffer */
+ m = m->next;
+ src_offset = 0;
+ }
+
+ if (unlikely(pkt_buf->data_len ==
+ avp->host_mbuf_size)) {
+ /* need a new destination buffer */
+ break;
+ }
+
+ } while (m != NULL);
+ }
+
+ first_buf->nb_segs = count;
+ first_buf->pkt_len = total_length;
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ first_buf->vlan_tci = mbuf->vlan_tci;
+ }
+
+ avp_dev_buffer_sanity_check(avp, buffers[0]);
+
+ return total_length;
+}
+
+
+static uint16_t
+avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)];
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ unsigned int orig_nb_pkts;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int segments;
+ unsigned int tx_bytes;
+ unsigned int i;
+
+ orig_nb_pkts = nb_pkts;
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+ if (unlikely(avail > (AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)))
+ avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ nb_pkts = RTE_MIN(count, nb_pkts);
+
+ /* determine how many packets will fit in the available buffers */
+ count = 0;
+ segments = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (likely(i < (unsigned int)nb_pkts - 1)) {
+ /* prefetch next entry while processing this one */
+ rte_prefetch0(tx_pkts[i + 1]);
+ }
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ if (unlikely((required == 0) ||
+ (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
+ break;
+ else if (unlikely(required + segments > avail))
+ break;
+ segments += required;
+ count++;
+ }
+ nb_pkts = count;
+
+ if (unlikely(nb_pkts == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ nb_pkts, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
+ if (unlikely(n != segments)) {
+ PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
+ "n=%u, segments=%u, orig=%u\n",
+ n, segments, orig_nb_pkts);
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ count = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* determine how many buffers are required for this packet */
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ tx_bytes += avp_dev_copy_to_buffers(avp, m,
+ &avp_bufs[count], required);
+ tx_bufs[i] = avp_bufs[count];
+ count += required;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += nb_pkts;
+ txq->bytes += tx_bytes;
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+ for (i = 0; i < nb_pkts; i++)
+ avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
+#endif
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
+ if (unlikely(n != orig_nb_pkts))
+ txq->errors += (orig_nb_pkts - n);
+
+ return n;
+}
+
+
+static uint16_t
+avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ struct rte_mbuf *m;
+ unsigned int pkt_len;
+ unsigned int tx_bytes;
+ char *pkt_data;
+ unsigned int i;
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+
+ if (unlikely(count == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ count, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
+ if (unlikely(n != count)) {
+ txq->errors++;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ for (i = 0; i < count; i++) {
+ /* prefetch next entry while processing the current one */
+ if (i < count - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = rte_pktmbuf_pkt_len(m);
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_len > avp->host_mbuf_size))) {
+ /*
+ * application should be using the scattered transmit
+ * function; send it truncated to avoid the performance
+ * hit of having to manage returning the already
+ * allocated buffer to the free list. This should not
+ * happen since the application should have set the
+ * max_rx_pkt_len based on its MTU and it should be
+ * policing its own packet sizes.
+ */
+ txq->errors++;
+ pkt_len = RTE_MIN(avp->guest_mbuf_size,
+ avp->host_mbuf_size);
+ }
+
+ /* copy data out of our mbuf and into the AVP buffer */
+ rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
+ pkt_buf->pkt_len = pkt_len;
+ pkt_buf->data_len = pkt_len;
+ pkt_buf->nb_segs = 1;
+ pkt_buf->next = NULL;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ pkt_buf->vlan_tci = m->vlan_tci;
+ }
+
+ tx_bytes += pkt_len;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += count;
+ txq->bytes += tx_bytes;
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
+
+ return n;
+}
+
static void
avp_dev_rx_queue_release(void *rx_queue)
{
}
}
+static void
+avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+
+ stats->q_ipackets[i] += rxq->packets;
+ stats->q_ibytes[i] += rxq->bytes;
+ stats->q_errors[i] += rxq->errors;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+
+ stats->q_opackets[i] += txq->packets;
+ stats->q_obytes[i] += txq->bytes;
+ stats->q_errors[i] += txq->errors;
+ }
+ }
+}
+
+static void
+avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ rxq->bytes = 0;
+ rxq->packets = 0;
+ rxq->errors = 0;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ txq->bytes = 0;
+ txq->packets = 0;
+ txq->errors = 0;
+ }
+ }
+}
+
RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd.pci_drv);
RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);