(sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
-#define HN_TXCOPY_THRESHOLD 512
-
-#define HN_RXCOPY_THRESHOLD 256
#define HN_RXQ_EVENT_DEFAULT 2048
struct hn_rxinfo {
* For large packets, avoid copy if possible but need to keep
* some space available in receive area for later packets.
*/
- if (dlen >= HN_RXCOPY_THRESHOLD &&
+ if (hv->rx_extmbuf_enable && dlen > hv->rx_copybreak &&
(uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
hv->rxbuf_section_cnt / 2) {
struct rte_mbuf_ext_shared_info *shinfo;
unsigned int size = rte_pktmbuf_data_len(m);
unsigned int offs = rte_mbuf_data_iova(m) & PAGE_MASK;
- slots += (offs + size + PAGE_SIZE - 1) / PAGE_SIZE;
+ slots += (offs + size + rte_mem_page_size() - 1) /
+ rte_mem_page_size();
m = m->next;
}
while (m) {
rte_iova_t addr = rte_mbuf_data_iova(m);
- unsigned int page = addr / PAGE_SIZE;
+ unsigned int page = addr / rte_mem_page_size();
unsigned int offset = addr & PAGE_MASK;
unsigned int len = rte_pktmbuf_data_len(m);
while (len > 0) {
- unsigned int bytes = RTE_MIN(len, PAGE_SIZE - offset);
+ unsigned int bytes = RTE_MIN(len,
+ rte_mem_page_size() - offset);
sg[segs].page = page;
sg[segs].ofs = offset;
addr = txq->tx_rndis_iova +
((char *)txd->rndis_pkt - (char *)txq->tx_rndis);
- sg[0].page = addr / PAGE_SIZE;
+ sg[0].page = addr / rte_mem_page_size();
sg[0].ofs = addr & PAGE_MASK;
sg[0].len = RNDIS_PACKET_MSG_OFFSET_ABS(hn_rndis_pktlen(txd->rndis_pkt));
segs = 1;
hn_process_events(hv, txq->queue_id, 0);
/* Transmit over VF if present and up */
- rte_rwlock_read_lock(&hv->vf_lock);
- vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->data->dev_started) {
- void *sub_q = vf_dev->data->tx_queues[queue_id];
-
- nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ if (hv->vf_ctx.vf_vsc_switched) {
+ rte_rwlock_read_lock(&hv->vf_lock);
+ vf_dev = hn_get_vf_dev(hv);
+ if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
+ vf_dev->data->dev_started) {
+ void *sub_q = vf_dev->data->tx_queues[queue_id];
+
+ nb_tx = (*vf_dev->tx_pkt_burst)
+ (sub_q, tx_pkts, nb_pkts);
+ rte_rwlock_read_unlock(&hv->vf_lock);
+ return nb_tx;
+ }
rte_rwlock_read_unlock(&hv->vf_lock);
- return nb_tx;
}
- rte_rwlock_read_unlock(&hv->vf_lock);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
break;
/* For small packets aggregate them in chimney buffer */
- if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+ if (m->pkt_len <= hv->tx_copybreak &&
+ pkt_size <= txq->agg_szmax) {
/* If this packet will not fit, then flush */
if (txq->agg_pktleft == 0 ||
RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
(void **)rx_pkts, nb_pkts, NULL);
/* If VF is available, check that as well */
- rte_rwlock_read_lock(&hv->vf_lock);
- vf_dev = hn_get_vf_dev(hv);
- if (vf_dev && vf_dev->data->dev_started)
- nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
- rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+ if (hv->vf_ctx.vf_vsc_switched) {
+ rte_rwlock_read_lock(&hv->vf_lock);
+ vf_dev = hn_get_vf_dev(hv);
+ if (hv->vf_ctx.vf_vsc_switched && vf_dev &&
+ vf_dev->data->dev_started)
+ nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
+ rx_pkts + nb_rcv,
+ nb_pkts - nb_rcv);
- rte_rwlock_read_unlock(&hv->vf_lock);
+ rte_rwlock_read_unlock(&hv->vf_lock);
+ }
return nb_rcv;
}