char pool_name[RTE_MEMPOOL_NAMESIZE];
char ring_name[RTE_MEMPOOL_NAMESIZE];
- rx_conf_default.start_rx_per_q = (uint8_t)zero_copy;
+ /*
+ * Zero copy defers queue RX/TX start to the time when guest
+ * finishes its startup and packet buffers from that guest are
+ * available.
+ */
+ rx_conf_default.rx_deferred_start = (uint8_t)zero_copy;
rx_conf_default.rx_drop_en = 0;
- tx_conf_default.start_tx_per_q = (uint8_t)zero_copy;
+ tx_conf_default.tx_deferred_start = (uint8_t)zero_copy;
nb_mbuf = num_rx_descriptor
+ num_switching_cores * MBUF_CACHE_SIZE_ZCP
+ num_switching_cores * MAX_PKT_BURST;
struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
- uint8_t start_rx_per_q; /**< start rx per queue. */
+ uint8_t rx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
};
#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
uint32_t txq_flags; /**< Set flags for the Tx queue */
- uint8_t start_tx_per_q; /**< start tx per queue. */
+ uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
};
/**
extern int rte_eth_dev_socket_id(uint8_t port_id);
/*
- * Start specified RX queue of a port
+ * Allocate mbuf from mempool, setup the DMA physical address
+ * and then start RX for specified queue of a port. It is used
+ * when rx_deferred_start flag of the specified queue is true.
*
* @param port_id
* The port identifier of the Ethernet device
extern int rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id);
/*
- * Start specified TX queue of a port
+ * Start TX for specified queue of a port. It is used when tx_deferred_start
+ * flag of the specified queue is true.
*
* @param port_id
* The port identifier of the Ethernet device
txq = dev_data->tx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
- if (!txq->q_set || (on && txq->start_tx_per_q))
+ if (!txq->q_set || (on && txq->tx_deferred_start))
continue;
if (on)
ret = i40e_dev_tx_queue_start(dev, i);
rxq = dev_data->rx_queues[i];
/* Don't operate the queue if not configured or
* if starting only per queue */
- if (!rxq->q_set || (on && rxq->start_rx_per_q))
+ if (!rxq->q_set || (on && rxq->rx_deferred_start))
continue;
if (on)
ret = i40e_dev_rx_queue_start(dev, i);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev_data->rx_queues[i];
- if (rxq->start_rx_per_q)
+ if (rxq->rx_deferred_start)
continue;
if (i40evf_dev_rx_queue_start(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev_data->tx_queues[i];
- if (txq->start_tx_per_q)
+ if (txq->tx_deferred_start)
continue;
if (i40evf_dev_tx_queue_start(dev, i) != 0) {
PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
rxq->vsi = vsi;
- rxq->start_rx_per_q = rx_conf->start_rx_per_q;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->vsi = vsi;
- txq->start_tx_per_q = tx_conf->start_tx_per_q;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
#ifdef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
uint16_t max_pkt_len; /* Maximum packet length */
uint8_t hs_mode; /* Header Split mode */
bool q_set; /**< indicate if rx queue has been configured */
- bool start_rx_per_q; /**< don't start this queue in dev start */
+ bool rx_deferred_start; /**< don't start this queue in dev start */
};
struct i40e_tx_entry {
uint16_t tx_next_dd;
uint16_t tx_next_rs;
bool q_set; /**< indicate if tx queue has been configured */
- bool start_tx_per_q; /**< don't start this queue in dev start */
+ bool tx_deferred_start; /**< don't start this queue in dev start */
};
int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
txq->port_id = dev->data->port_id;
txq->txq_flags = tx_conf->txq_flags;
txq->ops = &def_txq_ops;
- txq->start_tx_per_q = tx_conf->start_tx_per_q;
+ txq->tx_deferred_start = tx_conf->tx_deferred_start;
/*
* Modification to set VFTDT for virtual function if vf is detected
rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
0 : ETHER_CRC_LEN);
rxq->drop_en = rx_conf->rx_drop_en;
- rxq->start_rx_per_q = rx_conf->start_rx_per_q;
+ rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/*
* Allocate RX ring hardware descriptors. A memzone large enough to
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if (!txq->start_tx_per_q)
+ if (!txq->tx_deferred_start)
ixgbe_dev_tx_queue_start(dev, i);
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
- if (!rxq->start_rx_per_q)
+ if (!rxq->rx_deferred_start)
ixgbe_dev_rx_queue_start(dev, i);
}
uint8_t port_id; /**< Device port identifier. */
uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
- uint8_t start_rx_per_q;
+ uint8_t rx_deferred_start; /**< not in global dev start. */
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/** need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
/** Hardware context0 history. */
struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
struct ixgbe_txq_ops *ops; /**< txq ops */
- uint8_t start_tx_per_q;
+ uint8_t tx_deferred_start; /**< not in global dev start. */
};
struct ixgbe_txq_ops {