/* Minimum space required for a packet */
#define HN_PKTSIZE_MIN(align) \
- RTE_ALIGN(ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
+ RTE_ALIGN(RTE_ETHER_MIN_LEN + HN_RNDIS_PKT_LEN, align)
#define DEFAULT_TX_FREE_THRESH 32U
hn_update_packet_stats(struct hn_stats *stats, const struct rte_mbuf *m)
{
uint32_t s = m->pkt_len;
- const struct ether_addr *ea;
+ const struct rte_ether_addr *ea;
if (s == 64) {
stats->size_bins[1]++;
stats->size_bins[0]++;
else if (s < 1519)
stats->size_bins[6]++;
- else if (s >= 1519)
+ else
stats->size_bins[7]++;
}
- ea = rte_pktmbuf_mtod(m, const struct ether_addr *);
- if (is_multicast_ether_addr(ea)) {
- if (is_broadcast_ether_addr(ea))
+ ea = rte_pktmbuf_mtod(m, const struct rte_ether_addr *);
+ if (rte_is_multicast_ether_addr(ea)) {
+ if (rte_is_broadcast_ether_addr(ea))
stats->broadcast++;
else
stats->multicast++;
return 0;
}
+void
+hn_tx_pool_uninit(struct rte_eth_dev *dev)
+{
+ struct hn_data *hv = dev->data->dev_private;
+
+ if (hv->tx_pool) {
+ rte_mempool_free(hv->tx_pool);
+ hv->tx_pool = NULL;
+ }
+}
+
static void hn_reset_txagg(struct hn_tx_queue *txq)
{
txq->agg_szleft = txq->agg_szmax;
struct hn_data *hv = dev->data->dev_private;
struct hn_tx_queue *txq;
uint32_t tx_free_thresh;
+ int err;
PMD_INIT_FUNC_TRACE();
hn_reset_txagg(txq);
- dev->data->tx_queues[queue_idx] = txq;
+ err = hn_vf_tx_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, tx_conf);
+ if (err) {
+ rte_free(txq);
+ return err;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
return 0;
}
rte_free(txq);
}
-void
-hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
- struct rte_eth_txq_info *qinfo)
-{
- struct hn_data *hv = dev->data->dev_private;
- struct hn_tx_queue *txq = dev->data->rx_queues[queue_idx];
-
- qinfo->conf.tx_free_thresh = txq->free_thresh;
- qinfo->nb_desc = hv->tx_pool->size;
-}
-
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
m->vlan_tci = info->vlan_info;
m->ol_flags |= PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
+
+ /* NDIS always strips tag, put it back if necessary */
+ if (!hv->vlan_strip && rte_vlan_insert(&m)) {
+ PMD_DRV_LOG(DEBUG, "vlan insert failed");
+ ++rxq->stats.errors;
+ rte_pktmbuf_free(m);
+ return;
+ }
}
if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
if (unlikely(data_off + data_len > pkt->len))
goto error;
- if (unlikely(data_len < ETHER_HDR_LEN))
+ if (unlikely(data_len < RTE_ETHER_HDR_LEN))
goto error;
hn_rxpkt(rxq, rxb, data, data_off, data_len, &info);
hn_rx_buf_release(rxb);
}
+/*
+ * Called when NVS inband events are received.
+ * Send up a two part message with port_id and the NVS message
+ * to the pipe to the netvsc-vf-event control thread.
+ */
+static void hn_nvs_handle_notify(struct rte_eth_dev *dev,
+ const struct vmbus_chanpkt_hdr *pkt,
+ const void *data)
+{
+ const struct hn_nvs_hdr *hdr = data;
+
+ switch (hdr->type) {
+ case NVS_TYPE_TXTBL_NOTE:
+ /* Transmit indirection table has locking problems
+ * in DPDK and therefore not implemented
+ */
+ PMD_DRV_LOG(DEBUG, "host notify of transmit indirection table");
+ break;
+
+ case NVS_TYPE_VFASSOC_NOTE:
+ hn_nvs_handle_vfassoc(dev, pkt, data);
+ break;
+
+ default:
+ PMD_DRV_LOG(INFO,
+ "got notify, nvs type %u", hdr->type);
+ }
+}
+
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
uint16_t queue_id,
unsigned int socket_id)
hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct hn_data *hv = dev->data->dev_private;
char ring_name[RTE_RING_NAMESIZE];
struct hn_rx_queue *rxq;
unsigned int count;
+ int error = -ENOMEM;
PMD_INIT_FUNC_TRACE();
if (!rxq->rx_ring)
goto fail;
+ error = hn_vf_rx_queue_setup(dev, queue_idx, nb_desc,
+ socket_id, rx_conf, mp);
+ if (error)
+ goto fail;
+
dev->data->rx_queues[queue_idx] = rxq;
return 0;
rte_ring_free(rxq->rx_ring);
rte_free(rxq->event_buf);
rte_free(rxq);
- return -ENOMEM;
+ return error;
}
-void
-hn_dev_rx_queue_release(void *arg)
+static void
+hn_rx_queue_free(struct hn_rx_queue *rxq, bool keep_primary)
{
- struct hn_rx_queue *rxq = arg;
-
- PMD_INIT_FUNC_TRACE();
if (!rxq)
return;
rxq->rx_ring = NULL;
rxq->mb_pool = NULL;
- if (rxq != rxq->hv->primary) {
- rte_free(rxq->event_buf);
- rte_free(rxq);
- }
-}
+ hn_vf_rx_queue_release(rxq->hv, rxq->queue_id);
-int
-hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
-{
- struct hn_tx_queue *txq = arg;
+ /* Keep primary queue to allow for control operations */
+ if (keep_primary && rxq == rxq->hv->primary)
+ return;
- return hn_process_events(txq->hv, txq->queue_id, free_cnt);
+ rte_free(rxq->event_buf);
+ rte_free(rxq);
}
void
-hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
- struct rte_eth_rxq_info *qinfo)
+hn_dev_rx_queue_release(void *arg)
{
- struct hn_rx_queue *rxq = dev->data->rx_queues[queue_idx];
+ struct hn_rx_queue *rxq = arg;
- qinfo->mp = rxq->mb_pool;
- qinfo->scattered_rx = 1;
- qinfo->nb_desc = rte_ring_get_capacity(rxq->rx_ring);
+ PMD_INIT_FUNC_TRACE();
+
+ hn_rx_queue_free(rxq, true);
}
-static void
-hn_nvs_handle_notify(const struct vmbus_chanpkt_hdr *pkthdr,
- const void *data)
+int
+hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
{
- const struct hn_nvs_hdr *hdr = data;
-
- if (unlikely(vmbus_chanpkt_datalen(pkthdr) < sizeof(*hdr))) {
- PMD_DRV_LOG(ERR, "invalid nvs notify");
- return;
- }
+ struct hn_tx_queue *txq = arg;
- PMD_DRV_LOG(INFO,
- "got notify, nvs type %u", hdr->type);
+ return hn_process_events(txq->hv, txq->queue_id, free_cnt);
}
/*
break;
case VMBUS_CHANPKT_TYPE_INBAND:
- hn_nvs_handle_notify(pkt, data);
+ hn_nvs_handle_notify(dev, pkt, data);
break;
default:
hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct hn_tx_queue *txq = ptxq;
+ uint16_t queue_id = txq->queue_id;
struct hn_data *hv = txq->hv;
+ struct rte_eth_dev *vf_dev;
bool need_sig = false;
uint16_t nb_tx;
int ret;
if (unlikely(hv->closed))
return 0;
+ /* Transmit over VF if present and up */
+ vf_dev = hn_get_vf_dev(hv);
+
+ if (vf_dev && vf_dev->data->dev_started) {
+ void *sub_q = vf_dev->data->tx_queues[queue_id];
+
+ return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ }
+
if (rte_mempool_avail_count(hv->tx_pool) <= txq->free_thresh)
hn_process_events(hv, txq->queue_id, 0);
if (unlikely(!pkt))
break;
- hn_encap(pkt, txq->queue_id, m);
+ hn_encap(pkt, queue_id, m);
hn_append_to_chim(txq, pkt, m);
rte_pktmbuf_free(m);
txd->data_size += m->pkt_len;
++txd->packets;
- hn_encap(pkt, txq->queue_id, m);
+ hn_encap(pkt, queue_id, m);
ret = hn_xmit_sg(txq, txd, m, &need_sig);
if (unlikely(ret != 0)) {
return nb_tx;
}
+static uint16_t
+hn_recv_vf(uint16_t vf_port, const struct hn_rx_queue *rxq,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ uint16_t i, n;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ n = rte_eth_rx_burst(vf_port, rxq->queue_id, rx_pkts, nb_pkts);
+
+ /* relabel the received mbufs */
+ for (i = 0; i < n; i++)
+ rx_pkts[i]->port = rxq->port_id;
+
+ return n;
+}
+
uint16_t
hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct hn_rx_queue *rxq = prxq;
struct hn_data *hv = rxq->hv;
+ struct rte_eth_dev *vf_dev;
+ uint16_t nb_rcv;
if (unlikely(hv->closed))
return 0;
- /* If ring is empty then process more */
- if (rte_ring_count(rxq->rx_ring) < nb_pkts)
+ /* Receive from VF if present and up */
+ vf_dev = hn_get_vf_dev(hv);
+
+ /* Check for new completions */
+ if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
hn_process_events(hv, rxq->queue_id, 0);
- /* Get mbufs off staging ring */
- return rte_ring_sc_dequeue_burst(rxq->rx_ring, (void **)rx_pkts,
- nb_pkts, NULL);
+ /* Always check the vmbus path for multicast and new flows */
+ nb_rcv = rte_ring_sc_dequeue_burst(rxq->rx_ring,
+ (void **)rx_pkts, nb_pkts, NULL);
+
+ /* If VF is available, check that as well */
+ if (vf_dev && vf_dev->data->dev_started)
+ nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
+ rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+
+ return nb_rcv;
+}
+
+void
+hn_dev_free_queues(struct rte_eth_dev *dev)
+{
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct hn_rx_queue *rxq = dev->data->rx_queues[i];
+
+ hn_rx_queue_free(rxq, false);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ hn_dev_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
}