# Compile burst-oriented Chelsio Terminator (CXGBE) PMD
#
CONFIG_RTE_LIBRTE_CXGBE_PMD=y
-CONFIG_RTE_LIBRTE_CXGBE_TPUT=y
# NXP DPAA Bus
CONFIG_RTE_LIBRTE_DPAA_BUS=n
This controls compilation of both CXGBE and CXGBEVF PMD.
-- ``CONFIG_RTE_LIBRTE_CXGBE_TPUT`` (default **y**)
-
- Toggle behavior to prefer Throughput or Latency.
-
Runtime Options
~~~~~~~~~~~~~~~
enabled, the outer VLAN tag is preserved in Q-in-Q packets. Otherwise,
the outer VLAN tag is stripped in Q-in-Q packets.
+- ``tx_mode_latency`` (default **0**)
+
+ When set to 1, Tx doesn't wait for max number of packets to get
+ coalesced and sends the packets immediately at the end of the
+ current Tx burst. When set to 0, Tx waits across multiple Tx bursts
+ until the max number of packets have been coalesced. In this case,
+ Tx only sends the coalesced packets to hardware once the max
+ coalesce limit has been reached.
+
CXGBE VF Only Runtime Options
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
struct adapter_devargs {
bool keep_ovlan;
bool force_link_up;
+ bool tx_mode_latency;
};
struct adapter {
/* Common PF and VF devargs */
#define CXGBE_DEVARG_CMN_KEEP_OVLAN "keep_ovlan"
+#define CXGBE_DEVARG_CMN_TX_MODE_LATENCY "tx_mode_latency"
/* VF only devargs */
#define CXGBE_DEVARG_VF_FORCE_LINK_UP "force_link_up"
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
- CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> ");
+ CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> ");
RTE_INIT(cxgbe_init_log)
{
static int check_devargs_handler(const char *key, const char *value, void *p)
{
if (!strncmp(key, CXGBE_DEVARG_CMN_KEEP_OVLAN, strlen(key)) ||
+ !strncmp(key, CXGBE_DEVARG_CMN_TX_MODE_LATENCY, strlen(key)) ||
!strncmp(key, CXGBE_DEVARG_VF_FORCE_LINK_UP, strlen(key))) {
if (!strncmp(value, "1", 1)) {
bool *dst_val = (bool *)p;
{
cxgbe_get_devargs_int(adap, &adap->devargs.keep_ovlan,
CXGBE_DEVARG_CMN_KEEP_OVLAN, 0);
+ cxgbe_get_devargs_int(adap, &adap->devargs.tx_mode_latency,
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY, 0);
cxgbe_get_devargs_int(adap, &adap->devargs.force_link_up,
CXGBE_DEVARG_VF_FORCE_LINK_UP, 0);
}
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbevf, "* igb_uio | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_cxgbevf,
CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
CXGBE_DEVARG_VF_FORCE_LINK_UP "=<0|1> ");
unsigned int max_coal_pkt_num = is_pf4(adap) ? ETH_COALESCE_PKT_NUM :
ETH_COALESCE_VF_PKT_NUM;
-#ifdef RTE_LIBRTE_CXGBE_TPUT
- RTE_SET_USED(nb_pkts);
-#endif
-
if (q->coalesce.type == 0) {
mc = (struct ulp_txpkt *)q->coalesce.ptr;
mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
sd->coalesce.idx = (idx & 1) + 1;
- /* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == max_coal_pkt_num
-#ifndef RTE_LIBRTE_CXGBE_TPUT
- || q->coalesce.idx >= nb_pkts
-#endif
- )
+ /* Send the coalesced work request, only if max reached. However,
+ * if lower latency is preferred over throughput, then don't wait
+ * for coalescing the next Tx burst and send the packets now.
+ */
+ q->coalesce.idx++;
+ if (q->coalesce.idx == max_coal_pkt_num ||
+ (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
ship_tx_pkt_coalesce_wr(adap, txq);
+
return 0;
}