PKT_TX_OUTER_IP_CKSUM)
#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_IPV4 | \
+ PKT_TX_IPV6 | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_OUTER_IP_CKSUM | \
#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) &
(1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) {
- mb->ol_flags |= PKT_RX_QINQ_STRIPPED;
+ mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ |
+ PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN;
mb->vlan_tci_outer = mb->vlan_tci;
mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2);
PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u",
switch (ol_flags & PKT_TX_L4_MASK) {
case PKT_TX_TCP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
- *td_offset |= (sizeof(struct tcp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_SCTP_CKSUM:
*td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP;
- *td_offset |= (sizeof(struct sctp_hdr) >> 2) <<
+ *td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) <<
I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
break;
case PKT_TX_UDP_CKSUM:
*/
rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt_len -= ETHER_CRC_LEN;
- if (rx_packet_len <= ETHER_CRC_LEN) {
+ first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
+ if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
first_seg->nb_segs--;
last_seg->data_len =
(uint16_t)(last_seg->data_len -
- (ETHER_CRC_LEN - rx_packet_len));
+ (RTE_ETHER_CRC_LEN - rx_packet_len));
last_seg->next = NULL;
} else
rxm->data_len = (uint16_t)(rx_packet_len -
- ETHER_CRC_LEN);
+ RTE_ETHER_CRC_LEN);
}
first_seg->port = rxq->port_id;
/* Check for m->nb_segs to not exceed the limits. */
if (!(ol_flags & PKT_TX_TCP_SEG)) {
- if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG ||
+ m->pkt_len > I40E_FRAME_SIZE_MAX) {
rte_errno = -EINVAL;
return i;
}
} else if (m->nb_segs > I40E_TX_MAX_SEG ||
m->tso_segsz < I40E_MIN_TSO_MSS ||
- m->tso_segsz > I40E_MAX_TSO_MSS) {
+ m->tso_segsz > I40E_MAX_TSO_MSS ||
+ m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) {
/* MSS outside the range (256B - 9674B) are considered
* malicious
*/
}
/* check the size of packet */
- if (m->pkt_len > I40E_FRAME_SIZE_MAX ||
- m->pkt_len < I40E_TX_MIN_PKT_LEN) {
+ if (m->pkt_len < I40E_TX_MIN_PKT_LEN) {
rte_errno = -EINVAL;
return i;
}
i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct i40e_rx_queue *rxq;
- int err = -1;
+ int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- err = i40e_alloc_rx_queue_mbufs(rxq);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
- rte_wmb();
+ err = i40e_alloc_rx_queue_mbufs(rxq);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
+ return err;
+ }
- /* Init the RX tail regieter. */
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
+ rte_wmb();
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ /* Init the RX tail regieter. */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
- rx_queue_id);
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
+ rx_queue_id);
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- } else
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ return err;
}
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
-
- /*
- * rx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ rxq = dev->data->rx_queues[rx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
- rx_queue_id);
- return err;
- }
- i40e_rx_queue_release_mbufs(rxq);
- i40e_reset_rx_queue(rxq);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * rx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+ rx_queue_id);
+ return err;
}
+ i40e_rx_queue_release_mbufs(rxq);
+ i40e_reset_rx_queue(rxq);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
int
i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
- int err = -1;
+ int err;
struct i40e_tx_queue *txq;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- /*
- * tx_queue_id is queue id application refers to, while
- * rxq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
- if (err)
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
- tx_queue_id);
- else
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ txq = dev->data->tx_queues[tx_queue_id];
+
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * rxq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
+ tx_queue_id);
+ return err;
}
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
int
int err;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (tx_queue_id < dev->data->nb_tx_queues) {
- txq = dev->data->tx_queues[tx_queue_id];
-
- /*
- * tx_queue_id is queue id application refers to, while
- * txq->reg_idx is the real queue index.
- */
- err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ txq = dev->data->tx_queues[tx_queue_id];
- if (err) {
- PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
- tx_queue_id);
- return err;
- }
-
- i40e_tx_queue_release_mbufs(txq);
- i40e_reset_tx_queue(txq);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ /*
+ * tx_queue_id is queue id application refers to, while
+ * txq->reg_idx is the real queue index.
+ */
+ err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE);
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of",
+ tx_queue_id);
+ return err;
}
+ i40e_tx_queue_release_mbufs(txq);
+ i40e_reset_tx_queue(txq);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
return 0;
}
(uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
int use_scattered_rx =
- ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size);
+ (rxq->max_pkt_len > buf_size);
if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
ad->rx_bulk_alloc_allowed = false;
i40e_set_rx_function(dev);
return 0;
+ } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) {
+ PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor"
+ " number %d of queue %d isn't power of 2",
+ rxq->nb_rx_desc, rxq->queue_id);
+ return -EINVAL;
}
/* check bulk alloc conflict */
rxq->queue_id = queue_idx;
rxq->reg_idx = reg_idx;
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
- rxq->crc_len = ETHER_CRC_LEN;
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = RTE_ETHER_CRC_LEN;
else
rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
* - tx_rs_thresh must be a divisor of the ring size.
* - tx_free_thresh must be greater than 0.
* - tx_free_thresh must be less than the size of the ring minus 3.
+ * - tx_free_thresh + tx_rs_thresh must not exceed nb_desc.
*
* One descriptor in the TX ring is used as a sentinel to avoid a H/W
* race condition, hence the maximum threshold constraints. When set
* to zero use default values.
*/
- tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh) ?
- tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH);
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
+ /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
+ nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
+ if (tx_conf->tx_rs_thresh > 0)
+ tx_rs_thresh = tx_conf->tx_rs_thresh;
+ if (tx_rs_thresh + tx_free_thresh > nb_desc) {
+ PMD_INIT_LOG(ERR, "tx_rs_thresh + tx_free_thresh must not "
+ "exceed nb_desc. (tx_rs_thresh=%u "
+ "tx_free_thresh=%u nb_desc=%u port=%d queue=%d)",
+ (unsigned int)tx_rs_thresh,
+ (unsigned int)tx_free_thresh,
+ (unsigned int)nb_desc,
+ (int)dev->data->port_id,
+ (int)queue_idx);
+ return I40E_ERR_PARAM;
+ }
if (tx_rs_thresh >= (nb_desc - 2)) {
PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than the "
"number of TX descriptors minus 2. "
struct rte_eth_dev *dev;
uint16_t i;
- dev = &rte_eth_devices[txq->port_id];
-
if (!txq || !txq->sw_ring) {
- PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+ PMD_DRV_LOG(DEBUG, "Pointer to txq or sw_ring is NULL");
return;
}
+ dev = &rte_eth_devices[txq->port_id];
+
/**
* vPMD tx will not set sw_ring's mbuf to NULL after free,
* so need to free remains more carefully.
len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
+ if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
"be larger than %u and smaller than %u,"
"as jumbo frame is enabled",
- (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN,
(uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
- if (rxq->max_pkt_len < ETHER_MIN_LEN ||
- rxq->max_pkt_len > ETHER_MAX_LEN) {
+ if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
+ rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
"larger than %u and smaller than %u, "
"as jumbo frame is disabled",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ (uint32_t)RTE_ETHER_MIN_LEN,
+ (uint32_t)RTE_ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
- if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
+ if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- }
/* Init the RX tail regieter. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
}
- dev->data->nb_rx_queues = 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (!dev->data->tx_queues[i])
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
}
- dev->data->nb_tx_queues = 0;
}
#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
qinfo->conf.offloads = txq->offloads;
}
+static eth_rx_burst_t
+i40e_get_latest_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
+static eth_rx_burst_t
+i40e_get_recommend_rx_vec(bool scatter)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_rx_function(struct rte_eth_dev *dev)
{
}
}
- if (dev->data->scattered_rx) {
- /* Set the non-LRO scattered callback: there are Vector and
- * single allocation versions.
- */
- if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx "
- "callback (port=%d).",
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst =
- i40e_recv_scattered_pkts_vec_avx2;
-#endif
- } else {
- PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
- "allocation callback (port=%d).",
- dev->data->port_id);
- dev->rx_pkt_burst = i40e_recv_scattered_pkts;
- }
- /* If parameters allow we are going to choose between the following
- * callbacks:
- * - Vector
- * - Bulk Allocation
- * - Single buffer allocation (the simplest one)
- */
- } else if (ad->rx_vec_allowed) {
- PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX "
- "burst size no less than %d (port=%d).",
- RTE_I40E_DESCS_PER_LOOP,
- dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2;
-#endif
- } else if (ad->rx_bulk_alloc_allowed) {
+ if (ad->rx_vec_allowed) {
+ /* Vec Rx path */
+ PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
+ dev->data->port_id);
+ if (ad->use_latest_vec)
+ dev->rx_pkt_burst =
+ i40e_get_latest_rx_vec(dev->data->scattered_rx);
+ else
+ dev->rx_pkt_burst =
+ i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+ } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
"satisfied. Rx Burst Bulk Alloc function "
"will be used on port=%d.",
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
} else {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
- "satisfied, or Scattered Rx is requested "
- "(port=%d).",
+ /* Simple Rx Path. */
+ PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
dev->data->port_id);
-
- dev->rx_pkt_burst = i40e_recv_pkts;
+ dev->rx_pkt_burst = dev->data->scattered_rx ?
+ i40e_recv_scattered_pkts :
+ i40e_recv_pkts;
}
/* Propagate information about RX function choice through all queues. */
txq->queue_id);
}
+static eth_tx_burst_t
+i40e_get_latest_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
+static eth_tx_burst_t
+i40e_get_recommend_tx_vec(void)
+{
+#ifdef RTE_ARCH_X86
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
void __attribute__((cold))
i40e_set_tx_function(struct rte_eth_dev *dev)
{
if (ad->tx_simple_allowed) {
if (ad->tx_vec_allowed) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
- dev->tx_pkt_burst = i40e_xmit_pkts_vec;
-#ifdef RTE_ARCH_X86
- /*
- * since AVX frequency can be different to base
- * frequency, limit use of AVX2 version to later
- * plaforms, not all those that could theoretically
- * run it.
- */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
- dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2;
-#endif
+ if (ad->use_latest_vec)
+ dev->tx_pkt_burst =
+ i40e_get_latest_tx_vec();
+ else
+ dev->tx_pkt_burst =
+ i40e_get_recommend_tx_vec();
} else {
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
}
-/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */
+__rte_weak int
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-int __attribute__((weak))
+__rte_weak int
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)