values save CPU cycles. This parameter is in microseconds.
If the value is too large or too small it will be
ignored by the host. (Default: 50)
+
+#. ``rx_copybreak``:
+
+ The rx_copybreak sets the threshold where the driver uses an external
+ mbuf to avoid having to copy data. Setting 0 for copybreak will cause
+ driver to always create an external mbuf. Setting a value greater than
+ the MTU would prevent it from ever making an external mbuf and always
+ copy. The default value is 256 (bytes).
+
+#. ``tx_copybreak``:
+
+ The tx_copybreak sets the threshold where the driver aggregates
+ multiple small packets into one request. If tx_copybreak is 0 then
+ each packet goes as a VMBus request (no copying). If tx_copybreak is
+ set larger than the MTU, then all packets smaller than the chunk size
+ of the VMBus send buffer will be copied; larger packets always have to
+ go as a single direct request. The default value is 512 (bytes).
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_RSS_HASH)
+#define NETVSC_ARG_LATENCY "latency"
+#define NETVSC_ARG_RXBREAK "rx_copybreak"
+#define NETVSC_ARG_TXBREAK "tx_copybreak"
+
struct hn_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned int offset;
eth_dev->intr_handle = NULL;
}
-/* handle "latency=X" from devargs */
-static int hn_set_latency(const char *key, const char *value, void *opaque)
+static int hn_set_parameter(const char *key, const char *value, void *opaque)
{
struct hn_data *hv = opaque;
char *endp = NULL;
- unsigned long lat;
-
- errno = 0;
- lat = strtoul(value, &endp, 0);
+ unsigned long v;
+ v = strtoul(value, &endp, 0);
if (*value == '\0' || *endp != '\0') {
PMD_DRV_LOG(ERR, "invalid parameter %s=%s", key, value);
return -EINVAL;
}
- PMD_DRV_LOG(DEBUG, "set latency %lu usec", lat);
+ if (!strcmp(key, NETVSC_ARG_LATENCY)) {
+ /* usec to nsec */
+ hv->latency = v * 1000;
+ PMD_DRV_LOG(DEBUG, "set latency %u usec", hv->latency);
+ } else if (!strcmp(key, NETVSC_ARG_RXBREAK)) {
+ hv->rx_copybreak = v;
+ PMD_DRV_LOG(DEBUG, "rx copy break set to %u",
+ hv->rx_copybreak);
+ } else if (!strcmp(key, NETVSC_ARG_TXBREAK)) {
+ hv->tx_copybreak = v;
+ PMD_DRV_LOG(DEBUG, "tx copy break set to %u",
+ hv->tx_copybreak);
+ }
- hv->latency = lat * 1000; /* usec to nsec */
return 0;
}
struct hn_data *hv = dev->data->dev_private;
struct rte_devargs *devargs = dev->device->devargs;
static const char * const valid_keys[] = {
- "latency",
+ NETVSC_ARG_LATENCY,
+ NETVSC_ARG_RXBREAK,
+ NETVSC_ARG_TXBREAK,
NULL
};
struct rte_kvargs *kvlist;
kvlist = rte_kvargs_parse(devargs->args, valid_keys);
if (!kvlist) {
- PMD_DRV_LOG(NOTICE, "invalid parameters");
+ PMD_DRV_LOG(ERR, "invalid parameters");
return -EINVAL;
}
- ret = rte_kvargs_process(kvlist, "latency", hn_set_latency, hv);
- if (ret)
- PMD_DRV_LOG(ERR, "Unable to process latency arg\n");
-
+ ret = rte_kvargs_process(kvlist, NULL, hn_set_parameter, hv);
rte_kvargs_free(kvlist);
+
return ret;
}
hv->chim_res = &vmbus->resource[HV_SEND_BUF_MAP];
hv->port_id = eth_dev->data->port_id;
hv->latency = HN_CHAN_LATENCY_NS;
+ hv->rx_copybreak = HN_RXCOPY_THRESHOLD;
+ hv->tx_copybreak = HN_TXCOPY_THRESHOLD;
hv->max_queues = 1;
+
rte_rwlock_init(&hv->vf_lock);
hv->vf_port = HN_INVALID_PORT;
RTE_PMD_REGISTER_KMOD_DEP(net_netvsc, "* uio_hv_generic");
RTE_LOG_REGISTER(hn_logtype_init, pmd.net.netvsc.init, NOTICE);
RTE_LOG_REGISTER(hn_logtype_driver, pmd.net.netvsc.driver, NOTICE);
+RTE_PMD_REGISTER_PARAM_STRING(net_netvsc,
+ NETVSC_ARG_LATENCY "=<uint32> "
+ NETVSC_ARG_RXBREAK "=<uint32> "
+ NETVSC_ARG_TXBREAK "=<uint32>");
(sizeof(struct vmbus_chanpkt_hdr) + sizeof(struct hn_nvs_rndis))
#define HN_TXD_CACHE_SIZE 32 /* per cpu tx_descriptor pool cache */
-#define HN_TXCOPY_THRESHOLD 512
-
-#define HN_RXCOPY_THRESHOLD 256
#define HN_RXQ_EVENT_DEFAULT 2048
struct hn_rxinfo {
* For large packets, avoid copy if possible but need to keep
* some space available in receive area for later packets.
*/
- if (dlen >= HN_RXCOPY_THRESHOLD &&
+ if (dlen > hv->rx_copybreak &&
(uint32_t)rte_atomic32_read(&rxq->rxbuf_outstanding) <
hv->rxbuf_section_cnt / 2) {
struct rte_mbuf_ext_shared_info *shinfo;
break;
/* For small packets aggregate them in chimney buffer */
- if (m->pkt_len < HN_TXCOPY_THRESHOLD && pkt_size <= txq->agg_szmax) {
+ if (m->pkt_len <= hv->tx_copybreak &&
+ pkt_size <= txq->agg_szmax) {
/* If this packet will not fit, then flush */
if (txq->agg_pktleft == 0 ||
RTE_ALIGN(pkt_size, txq->agg_align) > txq->agg_szleft) {
/* Host monitor interval */
#define HN_CHAN_LATENCY_NS 50000
+#define HN_TXCOPY_THRESHOLD 512
+#define HN_RXCOPY_THRESHOLD 256
+
/* Buffers need to be aligned */
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
uint32_t rxbuf_section_cnt; /* # of Rx sections */
+ uint32_t rx_copybreak;
uint16_t max_queues; /* Max available queues */
uint16_t num_queues;
uint64_t rss_offloads;
struct rte_mem_resource *chim_res; /* UIO resource for Tx */
struct rte_bitmap *chim_bmap; /* Send buffer map */
void *chim_bmem;
+ uint32_t tx_copybreak;
uint32_t chim_szmax; /* Max size per buffer */
uint32_t chim_cnt; /* Max packets per buffer */