#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_dev.h>
+#include <rte_net.h>
#include <rte_bus_vmbus.h>
#include <rte_spinlock.h>
#define HN_TXCOPY_THRESHOLD 512
#define HN_RXCOPY_THRESHOLD 256
-#define HN_RXQ_EVENT_DEFAULT 1024
+#define HN_RXQ_EVENT_DEFAULT 2048
struct hn_rxinfo {
uint32_t vlan_info;
rte_free(txq);
}
+void
+hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hn_data *hv = dev->data->dev_private;
+ struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
+
+ qinfo->conf.tx_free_thresh = txq->free_thresh;
+ qinfo->nb_desc = hv->tx_pool->size;
+}
+
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
m->port = rxq->port_id;
m->pkt_len = dlen;
m->data_len = dlen;
+ m->packet_type = rte_net_get_ptype(m, NULL,
+ RTE_PTYPE_L2_MASK |
+ RTE_PTYPE_L3_MASK |
+ RTE_PTYPE_L4_MASK);
if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
m->vlan_tci = info->vlan_info;
if (info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK
| NDIS_RXCSUM_INFO_TCPCS_OK))
m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else if (info->csum_info & (NDIS_RXCSUM_INFO_TCPCS_FAILED
+ | NDIS_RXCSUM_INFO_UDPCS_FAILED))
+ m->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
m->hash.rss = info->hash_value;
}
- PMD_RX_LOG(DEBUG, "port %u:%u RX id %" PRIu64 " size %u ol_flags %#" PRIx64,
+ PMD_RX_LOG(DEBUG,
+ "port %u:%u RX id %"PRIu64" size %u type %#x ol_flags %#"PRIx64,
rxq->port_id, rxq->queue_id, rxb->xactid,
- m->pkt_len, m->ol_flags);
+ m->pkt_len, m->packet_type, m->ol_flags);
++rxq->stats.packets;
rxq->stats.bytes += m->pkt_len;
{
struct hn_rx_queue *rxq;
- rxq = rte_zmalloc_socket("HN_RXQ", sizeof(*rxq),
+ rxq = rte_zmalloc_socket("HN_RXQ",
+ sizeof(*rxq) + HN_RXQ_EVENT_DEFAULT,
RTE_CACHE_LINE_SIZE, socket_id);
if (rxq) {
rxq->hv = hv;
rte_spinlock_init(&rxq->ring_lock);
rxq->port_id = hv->port_id;
rxq->queue_id = queue_id;
-
- rxq->event_sz = HN_RXQ_EVENT_DEFAULT;
- rxq->event_buf = rte_malloc_socket("RX_EVENTS",
- rxq->event_sz,
- RTE_CACHE_LINE_SIZE,
- socket_id);
- if (!rxq->event_buf) {
- rte_free(rxq);
- rxq = NULL;
- }
}
return rxq;
}
}
}
+void
+hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->scattered_rx = 1;
+ qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
+}
+
static void
hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
const void *data)
{
struct rte_eth_dev *dev = &rte_eth_devices[hv->port_id];
struct hn_rx_queue *rxq;
+ uint32_t bytes_read = 0;
int ret = 0;
rxq = queue_id == 0 ? hv->primary : dev->data->rx_queues[queue_id];
for (;;) {
const struct vmbus_chanpkt_hdr *pkt;
- uint32_t len = rxq->event_sz;
+ uint32_t len = HN_RXQ_EVENT_DEFAULT;
const void *data;
ret = rte_vmbus_chan_recv_raw(rxq->chan, rxq->event_buf, &len);
if (ret == -EAGAIN)
break; /* ring is empty */
- if (ret == -ENOBUFS) {
- /* expanded buffer needed */
- len = rte_align32pow2(len);
- PMD_DRV_LOG(DEBUG, "expand event buf to %u", len);
-
- rxq->event_buf = rte_realloc(rxq->event_buf,
- len, RTE_CACHE_LINE_SIZE);
- if (rxq->event_buf) {
- rxq->event_sz = len;
- continue;
- }
-
- rte_exit(EXIT_FAILURE, "can not expand event buf!\n");
- break;
- }
-
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "vmbus ring buffer error: %d", ret);
- break;
- }
+ else if (ret == -ENOBUFS)
+ rte_exit(EXIT_FAILURE, "event buffer not big enough (%u < %u)",
+ HN_RXQ_EVENT_DEFAULT, len);
+ else if (ret <= 0)
+ rte_exit(EXIT_FAILURE,
+ "vmbus ring buffer error: %d", ret);
+ bytes_read += ret;
pkt = (const struct vmbus_chanpkt_hdr *)rxq->event_buf;
data = (char *)rxq->event_buf + vmbus_chanpkt_getlen(pkt->hlen);
PMD_DRV_LOG(ERR, "unknown chan pkt %u", pkt->type);
break;
}
+
+ if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
+ break;
}
- rte_spinlock_unlock(&rxq->ring_lock);
- if (unlikely(ret != -EAGAIN))
- PMD_DRV_LOG(ERR, "channel receive failed: %d", ret);
+ if (bytes_read > 0)
+ rte_vmbus_chan_signal_read(rxq->chan, bytes_read);
+
+ rte_spinlock_unlock(&rxq->ring_lock);
}
static void hn_append_to_chim(struct hn_tx_queue *txq,
pkt = hn_try_txagg(hv, txq, pkt_size);
if (unlikely(!pkt))
- goto fail;
+ break;
hn_encap(pkt, txq->queue_id, m);
hn_append_to_chim(txq, pkt, m);
} else {
txd = hn_new_txd(hv, txq);
if (unlikely(!txd))
- goto fail;
+ break;
}
pkt = txd->rndis_pkt;
if (unlikely(hv->closed))
return 0;
- /* Get all outstanding receive completions */
- hn_process_events(hv, rxq->queue_id);
+ /* If ring is empty then process more */
+ if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ hn_process_events(hv, rxq->queue_id);
/* Get mbufs off staging ring */
return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,