dev->data->dev_conf.rxmode.max_rx_pkt_len);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)ICE_FRAME_SIZE_MAX);
return -EINVAL;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return -EINVAL;
}
}
RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
- if ((rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size)
+ if (rxq->max_pkt_len > buf_size)
dev->data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
/* Init the Tx tail register*/
ICE_PCI_REG_WRITE(txq->qtx_tail, 0);
- err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, 1, &txq_elem,
- sizeof(txq_elem), NULL);
+ /* Fix me, we assume TC always 0 here */
+ err = ice_ena_vsi_txq(hw->port_info, vsi->idx, 0, tx_queue_id, 1,
+ &txq_elem, sizeof(txq_elem), NULL);
if (err) {
PMD_DRV_LOG(ERR, "Failed to add lan txq");
return -EIO;
{
struct ice_tx_queue *txq;
struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct ice_vsi *vsi = pf->main_vsi;
enum ice_status status;
uint16_t q_ids[1];
uint32_t q_teids[1];
+ uint16_t q_handle = tx_queue_id;
if (tx_queue_id >= dev->data->nb_tx_queues) {
PMD_DRV_LOG(ERR, "TX queue %u is out of range %u",
q_ids[0] = txq->reg_idx;
q_teids[0] = txq->q_teid;
- status = ice_dis_vsi_txq(hw->port_info, 1, q_ids, q_teids,
- ICE_NO_RESET, 0, NULL);
+ /* Fix me, we assume TC always 0 here */
+ status = ice_dis_vsi_txq(hw->port_info, vsi->idx, 0, 1, &q_handle,
+ q_ids, q_teids, ICE_NO_RESET, 0, NULL);
if (status != ICE_SUCCESS) {
PMD_DRV_LOG(DEBUG, "Failed to disable Lan Tx queue");
return -EINVAL;
rxq->reg_idx = vsi->base_queue + queue_idx;
rxq->port_id = dev->data->port_id;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
- rxq->crc_len = ETHER_CRC_LEN;
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
* - tx_rs_thresh must be a divisor of the ring size.
* - tx_free_thresh must be greater than 0.
* - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
*
* One descriptor in the TX ring is used as a sentinel to avoid a H/W
* race condition, hence the maximum threshold constraints. When set
* to zero use default values.
*/
- tx_rs_thresh = (uint16_t)(tx_conf->tx_rs_thresh ?
- tx_conf->tx_rs_thresh :
- ICE_DEFAULT_TX_RSBIT_THRESH);
tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh :
ICE_DEFAULT_TX_FREE_THRESH);
+ /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ tx_rs_thresh =
+ (ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
+ nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
+ if (tx_conf->tx_rs_thresh)
+ tx_rs_thresh = tx_conf->tx_rs_thresh;
+ if (tx_rs_thresh + tx_free_thresh > nb_desc) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
+ "exceed nb_desc. (tx_rs_thresh=%u "
+ "tx_free_thresh=%u nb_desc=%u port = %d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (unsigned int)tx_free_thresh,
+ (unsigned int)nb_desc,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return -EINVAL;
+ }
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
"number of TX descriptors minus 2. "
txq->q_set = TRUE;
dev->data->tx_queues[queue_idx] = txq;
txq->tx_rel_mbufs = _ice_tx_queue_release_mbufs;
+ ice_set_tx_function_flag(dev, txq);
return 0;
}
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (rx_packet_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len =
(uint16_t)(last_seg->data_len -
- (ETHER_CRC_LEN - rx_packet_len));
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
last_seg->next = NULL;
} else
rxm->data_len = (uint16_t)(rx_packet_len -
- ETHER_CRC_LEN);
+ RTE_ETHER_CRC_LEN);
}
first_seg->port = rxq->port_id;
#ifdef RTE_ARCH_X86
if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
- dev->rx_pkt_burst == ice_recv_pkts_vec_avx2)
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
return ptypes;
#endif
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_SCTP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
- *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
case PKT_TX_UDP_CKSUM:
*td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
- *td_offset |= (sizeof(struct udp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_udp_hdr) >> 2) <<
ICE_TX_DESC_LEN_L4_LEN_S;
break;
default:
if (dev->data->scattered_rx) {
PMD_DRV_LOG(DEBUG,
- "Using Vector Scattered Rx (port %d).",
+ "Using %sVector Scattered Rx (port %d).",
+ use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->rx_pkt_burst = ice_recv_scattered_pkts_vec;
+ dev->rx_pkt_burst = use_avx2 ?
+ ice_recv_scattered_pkts_vec_avx2 :
+ ice_recv_scattered_pkts_vec;
} else {
PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
use_avx2 ? "avx2 " : "",
}
}
+void __attribute__((cold))
+ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
+{
+ struct ice_adapter *ad =
+ ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= ICE_TX_MAX_BURST);
+
+ if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Simple Tx can NOT be enabled on Tx queue %u.",
+ txq->queue_id);
+}
+
/*********************************************************************
*
* TX prep functions
#ifdef RTE_ARCH_X86
struct ice_tx_queue *txq;
int i;
+ bool use_avx2 = false;
if (!ice_tx_vec_dev_check(dev)) {
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
(void)ice_txq_vec_setup(txq);
}
- PMD_DRV_LOG(DEBUG, "Using Vector Tx (port %d).",
+
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ use_avx2 = true;
+
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->tx_pkt_burst = ice_xmit_pkts_vec;
+ dev->tx_pkt_burst = use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
dev->tx_pkt_prepare = NULL;
return;