.remove = qat_pci_remove
};
-__attribute__((weak)) int
+__rte_weak int
qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused)
{
return 0;
return resp_counter;
}
-__attribute__((weak)) int
+__rte_weak int
qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
{
return 0;
return RTE_ETH_TX_DESC_FULL;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-int __attribute__((weak))
+__rte_weak int
avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
{
return -1;
}
/* Stubs needed for linkage when vPMD is disabled */
-int __attribute__((weak))
+__rte_weak int
fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_recv_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_recv_scattered_pkts_vec(
__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
fm10k_rx_queue_release_mbufs_vec(
__rte_unused struct fm10k_rx_queue *rxq)
{
return;
}
-void __attribute__((weak))
+__rte_weak void
fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
{
return;
}
-int __attribute__((weak))
+__rte_weak int
fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
}
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-int __attribute__((weak))
+__rte_weak int
i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return -1;
}
-void __attribute__((weak))
+__rte_weak void
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
{
return;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
struct rte_mbuf __rte_unused **tx_pkts,
uint16_t __rte_unused nb_pkts)
}
/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
{
return -1;
* (e.g. mlx5_rxtx_vec_sse.c for x86).
*/
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
struct rte_mbuf **pkts __rte_unused,
uint16_t pkts_n __rte_unused)
return 0;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_raw_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_vec_tx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
return -ENOTSUP;
}
-int __attribute__((weak))
+__rte_weak int
mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
{
return -ENOTSUP;
}
/* Stub for linkage when arch specific implementation is not available */
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
virtio_recv_pkts_vec(void *rx_queue __rte_unused,
struct rte_mbuf **rx_pkts __rte_unused,
uint16_t nb_pkts __rte_unused)
* If the compiler doesn't support AVX2 instructions,
* then the dummy one would be used instead for AVX2 classify method.
*/
-int __attribute__ ((weak))
+__rte_weak int
rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
__rte_unused const uint8_t **data,
__rte_unused uint32_t *results,
return -ENOTSUP;
}
-int __attribute__ ((weak))
+__rte_weak int
rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx,
__rte_unused const uint8_t **data,
__rte_unused uint32_t *results,
return -ENOTSUP;
}
-int __attribute__ ((weak))
+__rte_weak int
rte_acl_classify_neon(__rte_unused const struct rte_acl_ctx *ctx,
__rte_unused const uint8_t **data,
__rte_unused uint32_t *results,
return -ENOTSUP;
}
-int __attribute__ ((weak))
+__rte_weak int
rte_acl_classify_altivec(__rte_unused const struct rte_acl_ctx *ctx,
__rte_unused const uint8_t **data,
__rte_unused uint32_t *results,
return bpf;
}
-__rte_experimental __attribute__ ((weak)) struct rte_bpf *
+__rte_experimental __rte_weak struct rte_bpf *
rte_bpf_elf_load(const struct rte_bpf_prm *prm, const char *fname,
const char *sname)
{
/******* Macro to mark functions and fields scheduled for removal *****/
#define __rte_deprecated __attribute__((__deprecated__))
+/**
+ * Mark a function or variable to a weak reference.
+ */
+#define __rte_weak __attribute__((__weak__))
+
/*********** Macros to eliminate unused variable warnings ********/
/**