#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_version.h>
-#include <rte_eal_memconfig.h>
#include <rte_net.h>
#include "ena_ethdev.h"
#define DRV_MODULE_VER_MAJOR 2
#define DRV_MODULE_VER_MINOR 0
-#define DRV_MODULE_VER_SUBMINOR 0
+#define DRV_MODULE_VER_SUBMINOR 1
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
static int ena_queue_start_all(struct rte_eth_dev *dev,
enum ena_ring_type ring_type);
static void ena_stats_restart(struct rte_eth_dev *dev);
-static void ena_infos_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+static int ena_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
static int ena_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
uint16_t reta_size);
.reta_query = ena_rss_reta_query,
};
-#define NUMA_NO_NODE SOCKET_ID_ANY
-
-static inline int ena_cpu_to_node(int cpu)
-{
- struct rte_config *config = rte_eal_get_configuration();
- struct rte_fbarray *arr = &config->mem_config->memzones;
- const struct rte_memzone *mz;
-
- if (unlikely(cpu >= RTE_MAX_MEMZONE))
- return NUMA_NO_NODE;
-
- mz = rte_fbarray_get(arr, cpu);
-
- return mz->socket_id;
-}
-
static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf,
struct ena_com_rx_ctx *ena_rx_ctx)
{
}
/* check if L4 checksum is needed */
- if ((mbuf->ol_flags & PKT_TX_TCP_CKSUM) &&
+ if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) &&
(queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP;
ena_tx_ctx->l4_csum_enable = true;
- } else if ((mbuf->ol_flags & PKT_TX_UDP_CKSUM) &&
- (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
+ } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_UDP_CKSUM) &&
+ (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) {
ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP;
ena_tx_ctx->l4_csum_enable = true;
} else {
}
ctx.qid = ena_qid;
ctx.msix_vector = -1; /* interrupts not used */
- ctx.numa_node = ena_cpu_to_node(ring->id);
+ ctx.numa_node = ring->numa_socket_id;
rc = ena_com_create_io_queue(ena_dev, &ctx);
if (rc) {
static int ena_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
- __rte_unused unsigned int socket_id,
+ unsigned int socket_id,
const struct rte_eth_txconf *tx_conf)
{
struct ena_ring *txq = NULL;
txq->next_to_clean = 0;
txq->next_to_use = 0;
txq->ring_size = nb_desc;
+ txq->numa_socket_id = socket_id;
txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info",
sizeof(struct ena_tx_buffer) *
static int ena_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
- __rte_unused unsigned int socket_id,
+ unsigned int socket_id,
__rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
rxq->ring_size = nb_desc;
+ rxq->numa_socket_id = socket_id;
rxq->mb_pool = mp;
rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info",
}
}
-static void ena_infos_get(struct rte_eth_dev *dev,
+static int ena_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct ena_adapter *adapter;
adapter->max_tx_sgl_size);
dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS,
adapter->max_tx_sgl_size);
+
+ return 0;
}
static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx);
if (unlikely(mbuf_head->ol_flags &
- (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD)))
+ (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) {
+ rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors);
++rx_ring->rx_stats.bad_csum;
+ }
mbuf_head->hash.rss = ena_rx_ctx.hash;
/* Set TX offloads flags, if applicable */
ena_tx_mbuf_prepare(mbuf, &ena_tx_ctx, tx_ring->offloads);
- if (unlikely(mbuf->ol_flags &
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD)))
- rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors);
-
rte_prefetch0(tx_pkts[(sent_idx + 4) & ring_mask]);
/* Process first segment taking into