X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fnetvsc%2Fhn_rxtx.c;h=c6bf7cc132335438ea44c074a533185149fc912c;hb=b797b049b50656afb709718a6f75751b49cd515e;hp=65f1abae510d815d8a36dca28407d308ff37cac6;hpb=d9fecbe97ba7c1ad0dedf33910d8b4e4e444979a;p=dpdk.git diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c index 65f1abae51..c6bf7cc132 100644 --- a/drivers/net/netvsc/hn_rxtx.c +++ b/drivers/net/netvsc/hn_rxtx.c @@ -40,9 +40,6 @@ (sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis)) #define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */ -#define HN_TXCOPY_THRESHOLD 512 - -#define HN_RXCOPY_THRESHOLD 256 #define HN_RXQ_EVENT_DEFAULT 2048 struct hn_rxinfo { @@ -252,16 +249,6 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, - socket_id); - if (!txq) - return -ENOMEM; - - txq->hv = hv; - txq->chan = hv->channels[queue_idx]; - txq->port_id = dev->data->port_id; - txq->queue_id = queue_idx; - tx_free_thresh = tx_conf->tx_free_thresh; if (tx_free_thresh == 0) tx_free_thresh = RTE_MIN(nb_desc / 4, @@ -276,6 +263,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } + txq = rte_zmalloc_socket("HN_TXQ", sizeof(*txq), RTE_CACHE_LINE_SIZE, + socket_id); + if (!txq) + return -ENOMEM; + + txq->hv = hv; + txq->chan = hv->channels[queue_idx]; + txq->port_id = dev->data->port_id; + txq->queue_id = queue_idx; txq->free_thresh = tx_free_thresh; snprintf(name, sizeof(name), @@ -284,10 +280,15 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_LOG(DEBUG, "TX descriptor pool %s n=%u size=%zu", name, nb_desc, sizeof(struct hn_txdesc)); - txq->tx_rndis = rte_calloc("hn_txq_rndis", nb_desc, - HN_RNDIS_PKT_ALIGNED, RTE_CACHE_LINE_SIZE); - if (txq->tx_rndis == NULL) + txq->tx_rndis_mz = rte_memzone_reserve_aligned(name, + nb_desc * HN_RNDIS_PKT_ALIGNED, rte_socket_id(), + RTE_MEMZONE_IOVA_CONTIG, HN_RNDIS_PKT_ALIGNED); + if (!txq->tx_rndis_mz) { + err = -rte_errno; goto error; + } + txq->tx_rndis = txq->tx_rndis_mz->addr; + txq->tx_rndis_iova = txq->tx_rndis_mz->iova; txq->txdesc_pool = rte_mempool_create(name, nb_desc, sizeof(struct hn_txdesc), @@ -316,7 +317,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev, error: if (txq->txdesc_pool) rte_mempool_free(txq->txdesc_pool); - rte_free(txq->tx_rndis); + rte_memzone_free(txq->tx_rndis_mz); rte_free(txq); return err; } @@ -367,7 +368,7 @@ hn_dev_tx_queue_release(void *arg) if (txq->txdesc_pool) rte_mempool_free(txq->txdesc_pool); - rte_free(txq->tx_rndis); + rte_memzone_free(txq->tx_rndis_mz); rte_free(txq); } @@ -569,7 +570,7 @@ static void hn_rxpkt(struct hn_rx_queue *rxq, struct hn_rx_bufinfo *rxb, * For large packets, avoid copy if possible but need to keep * some space available in receive area for later packets. */ - if (dlen >= HN_RXCOPY_THRESHOLD && + if (hv->rx_extmbuf_enable && dlen > hv->rx_copybreak && (uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) < hv->rxbuf_section_cnt / 2) { struct rte_mbuf_ext_shared_info *shinfo; @@ -1386,7 +1387,8 @@ static unsigned int hn_get_slots(const struct rte_mbuf *m) unsigned int size = rte_pktmbuf_data_len(m); unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK; - slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE; + slots += (offs + size + rte_mem_page_size() - 1) / + rte_mem_page_size(); m = m->next; } @@ -1401,12 +1403,13 @@ static unsigned int hn_fill_sg(struct vmbus_gpa *sg, while (m) { rte_iova_t addr = rte_mbuf_data_iova(m); - unsigned int page = addr / PAGE_SIZE; + unsigned int page = addr / rte_mem_page_size(); unsigned int offset = addr & PAGE_MASK; unsigned int len = rte_pktmbuf_data_len(m); while (len > 0) { - unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset); + unsigned int bytes = RTE_MIN(len, + rte_mem_page_size() - offset); sg[segs].page = page; sg[segs].ofs = offset; @@ -1446,14 +1449,10 @@ static int hn_xmit_sg(struct hn_tx_queue *txq, hn_rndis_dump(txd->rndis_pkt); /* pass IOVA of rndis header in first segment */ - addr = rte_malloc_virt2iova(txq->tx_rndis); - if (unlikely(addr == RTE_BAD_IOVA)) { - PMD_DRV_LOG(ERR, "RNDIS transmit can not get iova"); - return -EINVAL; - } - addr = addr + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis); + addr = txq->tx_rndis_iova + + ((char *)txd->rndis_pkt - (char *)txq->tx_rndis); - sg[0].page = addr / PAGE_SIZE; + sg[0].page = addr / rte_mem_page_size(); sg[0].ofs = addr & PAGE_MASK; sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt)); segs = 1; @@ -1495,16 +1494,20 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) hn_process_events(hv, txq->queue_id, 0); /* Transmit over VF if present and up */ - rte_rwlock_read_lock(&hv->vf_lock); - vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->data->dev_started) { - void *sub_q = vf_dev->data->tx_queues[queue_id]; - - nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts); + if (hv->vf_ctx.vf_vsc_switched) { + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (hv->vf_ctx.vf_vsc_switched && vf_dev && + vf_dev->data->dev_started) { + void *sub_q = vf_dev->data->tx_queues[queue_id]; + + nb_tx = (*vf_dev->tx_pkt_burst) + (sub_q, tx_pkts, nb_pkts); + rte_rwlock_read_unlock(&hv->vf_lock); + return nb_tx; + } rte_rwlock_read_unlock(&hv->vf_lock); - return nb_tx; } - rte_rwlock_read_unlock(&hv->vf_lock); for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *m = tx_pkts[nb_tx]; @@ -1517,7 +1520,8 @@ hn_xmit_pkts(void *ptxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) break; /* For small packets aggregate them in chimney buffer */ - if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) { + if (m->pkt_len <= hv->tx_copybreak && + pkt_size <= txq->agg_szmax) { /* If this packet will not fit, then flush */ if (txq->agg_pktleft == 0 || RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) { @@ -1616,13 +1620,17 @@ hn_recv_pkts(void *prxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) (void **)rx_pkts, nb_pkts, NULL); /* If VF is available, check that as well */ - rte_rwlock_read_lock(&hv->vf_lock); - vf_dev = hn_get_vf_dev(hv); - if (vf_dev && vf_dev->data->dev_started) - nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq, - rx_pkts + nb_rcv, nb_pkts - nb_rcv); + if (hv->vf_ctx.vf_vsc_switched) { + rte_rwlock_read_lock(&hv->vf_lock); + vf_dev = hn_get_vf_dev(hv); + if (hv->vf_ctx.vf_vsc_switched && vf_dev && + vf_dev->data->dev_started) + nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq, + rx_pkts + nb_rcv, + nb_pkts - nb_rcv); - rte_rwlock_read_unlock(&hv->vf_lock); + rte_rwlock_read_unlock(&hv->vf_lock); + } return nb_rcv; }