X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=b5e6db6245bf3ce4d43944cba268b05eb5ed88dc;hb=68d3524267d9136b66a239604b6c6e15a263340d;hp=702289b80302b5836bb58c6de5db827ed1d9a75c;hpb=cb6696d22023efad238709239792ec66b0920017;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 702289b803..b5e6db6245 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -37,6 +37,9 @@ #include #include #include +#include +#include +#include #include "ena_ethdev.h" #include "ena_logs.h" @@ -49,6 +52,10 @@ #include #include +#define DRV_MODULE_VER_MAJOR 1 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 0 + #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) /*reverse version of ENA_IO_RXQ_IDX*/ @@ -72,6 +79,89 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_HASH_KEY_SIZE 40 +#define ENA_ETH_SS_STATS 0xFF +#define ETH_GSTRING_LEN 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENA_COM_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ +} + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(io_suspend), + ENA_STAT_GLOBAL_ENTRY(io_resume), + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(interface_up), + ENA_STAT_GLOBAL_ENTRY(interface_down), + ENA_STAT_GLOBAL_ENTRY(admin_q_pause), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(queue_stop), + ENA_STAT_TX_ENTRY(queue_wakeup), + ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(missing_tx_comp), + ENA_STAT_TX_ENTRY(bad_req_id), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(page_alloc_fail), + ENA_STAT_RX_ENTRY(skb_alloc_fail), + ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(small_copy_len_pkt), +}; + +static const struct ena_stats ena_stats_ena_com_strings[] = { + ENA_STAT_ENA_COM_ENTRY(aborted_cmd), + ENA_STAT_ENA_COM_ENTRY(submitted_cmd), + ENA_STAT_ENA_COM_ENTRY(completed_cmd), + ENA_STAT_ENA_COM_ENTRY(out_of_space), + ENA_STAT_ENA_COM_ENTRY(no_completion), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) +#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -79,12 +169,18 @@ #define PCI_DEVICE_ID_ENA_VF 0xEC20 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 -static struct rte_pci_id pci_id_ena_map[] = { -#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#define ENA_TX_OFFLOAD_MASK (\ + PKT_TX_L4_MASK | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_SEG) - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) - {.device_id = 0}, +#define ENA_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + +static const struct rte_pci_id pci_id_ena_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, + { .device_id = 0 }, }; static int ena_device_init(struct ena_com_dev *ena_dev, @@ -92,6 +188,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, static int ena_dev_configure(struct rte_eth_dev *dev); static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -127,8 +225,9 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); -static struct eth_dev_ops ena_dev_ops = { +static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, .dev_infos_get = ena_infos_get, .rx_queue_setup = ena_rx_queue_setup, @@ -144,6 +243,18 @@ static struct eth_dev_ops ena_dev_ops = { .reta_query = ena_rss_reta_query, }; +#define NUMA_NO_NODE SOCKET_ID_ANY + +static inline int ena_cpu_to_node(int cpu) +{ + struct rte_config *config = rte_eal_get_configuration(); + + if (likely(cpu < RTE_MAX_MEMZONE)) + return config->mem_config->memzone[cpu].socket_id; + + return NUMA_NO_NODE; +} + static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx) { @@ -226,6 +337,100 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_DPDK; + host_info->kernel_ver = RTE_VERSION; + snprintf((char *)host_info->kernel_ver_str, + sizeof(host_info->kernel_ver_str), + "%s", rte_version()); + host_info->os_dist = RTE_VERSION; + snprintf((char *)host_info->os_dist_str, + sizeof(host_info->os_dist_str), + "%s", rte_version()); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << + ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + if (rc != -EPERM) + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +static int +ena_get_sset_count(struct rte_eth_dev *dev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + /* Workaround for clang: + * touch internal structures to prevent + * compiler error + */ + ENA_TOUCH(ena_stats_global_strings); + ENA_TOUCH(ena_stats_tx_strings); + ENA_TOUCH(ena_stats_rx_strings); + ENA_TOUCH(ena_stats_ena_com_strings); + + return dev->data->nb_tx_queues * + (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); + if (ss_count <= 0) { + RTE_LOG(ERR, PMD, "SS count is negative\n"); + return; + } + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + if (rc != -EPERM) + goto err; + } + + return; +err: + ena_com_delete_debug_area(&adapter->ena_dev); +} + static void ena_close(struct rte_eth_dev *dev) { struct ena_adapter *adapter = @@ -477,8 +682,7 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring) if (m) __rte_mbuf_raw_free(m); - ring->next_to_clean = - ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + ring->next_to_clean++; } } @@ -493,8 +697,7 @@ static void ena_tx_queue_release_bufs(struct ena_ring *ring) if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); - ring->next_to_clean = - ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + ring->next_to_clean++; } } @@ -538,7 +741,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, if (rc) { PMD_INIT_LOG(ERR, - "failed to restart queue %d type(%d)\n", + "failed to restart queue %d type(%d)", i, ring_type); return -1; } @@ -564,7 +767,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) uint32_t max_frame_len = ena_get_mtu_conf(adapter); if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len); + PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); return -1; } @@ -591,7 +794,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, queue_size = rte_align32pow2(queue_size >> 1); if (queue_size == 0) { - PMD_INIT_LOG(ERR, "Invalid queue size\n"); + PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } @@ -727,9 +930,9 @@ static int ena_queue_restart(struct ena_ring *ring) if (ring->type == ENA_RING_TYPE_TX) return 0; - rc = ena_populate_rx_queue(ring, ring->ring_size - 1); - if ((unsigned int)rc != ring->ring_size - 1) { - PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n"); + rc = ena_populate_rx_queue(ring, ring->ring_size); + if ((unsigned int)rc != ring->ring_size) { + PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); return (-1); } @@ -742,6 +945,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, __rte_unused unsigned int socket_id, __rte_unused const struct rte_eth_txconf *tx_conf) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; struct ena_ring *txq = NULL; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); @@ -759,6 +966,13 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -1; } + if (!rte_is_power_of_2(nb_desc)) { + RTE_LOG(ERR, PMD, + "Unsupported size of RX queue: %d is not a power of 2.", + nb_desc); + return -EINVAL; + } + if (nb_desc > adapter->tx_ring_size) { RTE_LOG(ERR, PMD, "Unsupported size of TX queue (max size: %d)\n", @@ -767,11 +981,15 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_TXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_TX, - ena_dev->tx_mem_queue_type, - -1 /* admin interrupts is not used */, - nb_desc); + + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.qid = ena_qid; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.queue_size = adapter->tx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", @@ -780,6 +998,17 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &txq->ena_com_io_sq, + &txq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get TX queue handlers. TX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + goto err; + } + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -808,7 +1037,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; - +err: return rc; } @@ -819,6 +1048,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; @@ -834,6 +1067,13 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -1; } + if (!rte_is_power_of_2(nb_desc)) { + RTE_LOG(ERR, PMD, + "Unsupported size of TX queue: %d is not a power of 2.", + nb_desc); + return -EINVAL; + } + if (nb_desc > adapter->rx_ring_size) { RTE_LOG(ERR, PMD, "Unsupported size of RX queue (max size: %d)\n", @@ -842,11 +1082,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_RXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_RX, - ENA_ADMIN_PLACEMENT_POLICY_HOST, - -1 /* admin interrupts not used */, - nb_desc); + + ctx.qid = ena_qid; + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.queue_size = adapter->rx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); @@ -854,6 +1098,16 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &rxq->ena_com_io_sq, + &rxq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get RX queue handlers. RX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + rxq->port_id = dev->data->port_id; rxq->next_to_clean = 0; rxq->next_to_use = 0; @@ -879,23 +1133,25 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) { unsigned int i; int rc; - unsigned int ring_size = rxq->ring_size; - unsigned int ring_mask = ring_size - 1; - int next_to_use = rxq->next_to_use & ring_mask; + uint16_t ring_size = rxq->ring_size; + uint16_t ring_mask = ring_size - 1; + uint16_t next_to_use = rxq->next_to_use; + uint16_t in_use; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) return 0; - ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean, - rxq->ring_size)) + - count) < rxq->ring_size), "bad ring state"); + in_use = rxq->next_to_use - rxq->next_to_clean; + ena_assert_msg(((in_use + count) <= ring_size), "bad ring state"); - count = RTE_MIN(count, ring_size - next_to_use); + count = RTE_MIN(count, + (uint16_t)(ring_size - (next_to_use & ring_mask))); /* get resources for incoming packets */ rc = rte_mempool_get_bulk(rxq->mb_pool, - (void **)(&mbufs[next_to_use]), count); + (void **)(&mbufs[next_to_use & ring_mask]), + count); if (unlikely(rc < 0)) { rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); PMD_RX_LOG(DEBUG, "there are no enough free buffers"); @@ -903,7 +1159,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } for (i = 0; i < count; i++) { - struct rte_mbuf *mbuf = mbufs[next_to_use]; + uint16_t next_to_use_masked = next_to_use & ring_mask; + struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); @@ -912,18 +1169,22 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use); + &ebuf, next_to_use_masked); if (unlikely(rc)) { RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); break; } - next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size); + next_to_use++; } - rte_wmb(); - rxq->next_to_use = next_to_use; - /* let HW know that it can fill buffers with data */ - ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + /* When we submitted free recources to device... */ + if (i > 0) { + /* ...let HW know that it can fill buffers with data */ + rte_wmb(); + ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + + rxq->next_to_use = next_to_use; + } return i; } @@ -932,6 +1193,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { int rc; + bool readless_supported; /* Initialize mmio registers */ rc = ena_com_mmio_reg_read_request_init(ena_dev); @@ -940,6 +1202,14 @@ static int ena_device_init(struct ena_com_dev *ena_dev, return rc; } + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled. + */ + readless_supported = + !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id + & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + /* reset device */ rc = ena_com_dev_reset(ena_dev); if (rc) { @@ -970,6 +1240,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, */ ena_com_set_admin_polling_mode(ena_dev, true); + ena_config_host_info(ena_dev); + /* Get Device Attributes and features */ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (rc) { @@ -1006,16 +1278,17 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ena_dev_ops; eth_dev->rx_pkt_burst = ð_ena_recv_pkts; eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; adapter->rte_eth_dev_data = eth_dev->data; adapter->rte_dev = eth_dev; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; + pci_dev = RTE_DEV_TO_PCI(eth_dev->device); adapter->pdev = pci_dev; - PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", + PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, @@ -1032,7 +1305,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) else if (adapter->regs) ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; else - PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", + PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", ENA_REGS_BAR); ena_dev->reg_bar = adapter->regs; @@ -1046,7 +1319,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* device specific initialization routine */ rc = ena_device_init(ena_dev, &get_feat_ctx); if (rc) { - PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); + PMD_INIT_LOG(CRIT, "Failed to init ENA device"); return -1; } @@ -1054,7 +1327,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) if (get_feat_ctx.max_queues.max_llq_num == 0) { PMD_INIT_LOG(ERR, "Trying to use LLQ but llq_num is 0.\n" - "Fall back into regular queues.\n"); + "Fall back into regular queues."); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; adapter->num_queues = @@ -1077,9 +1350,15 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* prepare ring structures */ ena_init_rings(adapter); + ena_config_debug_area(adapter); + /* Set max MTU for this device */ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; + /* set device support for TSO */ + adapter->tso4_supported = get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; + /* Copy MAC address and point DPDK to it */ eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, @@ -1106,7 +1385,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev) if (!(adapter->state == ENA_ADAPTER_STATE_INIT || adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n", + PMD_INIT_LOG(ERR, "Illegal adapter state: %d", adapter->state); return -1; } @@ -1168,6 +1447,8 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device); + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | @@ -1196,7 +1477,7 @@ static void ena_infos_get(struct rte_eth_dev *dev, DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; - if (feat.offload.tx & + if (feat.offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -1222,7 +1503,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_size = rx_ring->ring_size; unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; - int desc_in_use = 0; + uint16_t desc_in_use = 0; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1240,8 +1521,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return 0; } - desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use, - next_to_clean, ring_size); + desc_in_use = rx_ring->next_to_use - next_to_clean; if (unlikely(nb_pkts > desc_in_use)) nb_pkts = desc_in_use; @@ -1282,8 +1562,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_prev = mbuf; segments++; - next_to_clean = - ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size); + next_to_clean++; } /* fill mbuf attributes if any */ @@ -1296,19 +1575,80 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* Burst refill to save doorbells, memory barriers, const interval */ - if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size)) - ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1); + if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) + ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); - rx_ring->next_to_clean = next_to_clean & ring_mask; + rx_ring->next_to_clean = next_to_clean; return recv_idx; } +static uint16_t +eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int32_t ret; + uint32_t i; + struct rte_mbuf *m; + struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); + struct ipv4_hdr *ip_hdr; + uint64_t ol_flags; + uint16_t frag_field; + + /* ENA needs partial checksum for TSO packets only, skip early */ + if (!tx_ring->adapter->tso4_supported) + return nb_pkts; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || + (ol_flags & PKT_TX_L4_MASK) == + PKT_TX_SCTP_CKSUM) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + + if (!(m->ol_flags & PKT_TX_IPV4)) + continue; + + ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + m->l2_len); + frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); + if (frag_field & IPV4_HDR_DF_FLAG) + continue; + + /* In case we are supposed to TSO and have DF not set (DF=0) + * hardware must be provided with partial checksum, otherwise + * it will take care of necessary calculations. + */ + + ret = rte_net_intel_cksum_flags_prepare(m, + ol_flags & ~PKT_TX_TCP_SEG); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); - unsigned int next_to_use = tx_ring->next_to_use; + uint16_t next_to_use = tx_ring->next_to_use; + uint16_t next_to_clean = tx_ring->next_to_clean; struct rte_mbuf *mbuf; unsigned int ring_size = tx_ring->ring_size; unsigned int ring_mask = ring_size - 1; @@ -1316,7 +1656,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ena_tx_buffer *tx_info; struct ena_com_buf *ebuf; uint16_t rc, req_id, total_tx_descs = 0; - int sent_idx = 0; + uint16_t sent_idx = 0, empty_tx_reqs; int nb_hw_desc; /* Check adapter state */ @@ -1326,10 +1666,14 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return 0; } + empty_tx_reqs = ring_size - (next_to_use - next_to_clean); + if (nb_pkts > empty_tx_reqs) + nb_pkts = empty_tx_reqs; + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; - req_id = tx_ring->empty_tx_reqs[next_to_use]; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; tx_info->num_of_bufs = 0; @@ -1392,12 +1736,17 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_info->tx_descs = nb_hw_desc; - next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size); + next_to_use++; } - /* Let HW do it's best :-) */ - rte_wmb(); - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + /* If there are ready packets to be xmitted... */ + if (sent_idx > 0) { + /* ...let HW do its best :-) */ + rte_wmb(); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + + tx_ring->next_to_use = next_to_use; + } /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { @@ -1410,45 +1759,34 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, rte_pktmbuf_free(mbuf); /* Put back descriptor to the ring for reuse */ - tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id; - tx_ring->next_to_clean = - ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean, - tx_ring->ring_size); + tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; + next_to_clean++; /* If too many descs to clean, leave it for another run */ if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) break; } - /* acknowledge completion of sent packets */ - ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); - tx_ring->next_to_use = next_to_use; + if (total_tx_descs > 0) { + /* acknowledge completion of sent packets */ + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + tx_ring->next_to_clean = next_to_clean; + } + return sent_idx; } static struct eth_driver rte_ena_pmd = { - { - .name = "rte_ena_pmd", + .pci_drv = { .id_table = pci_id_ena_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_ena_dev_init, .dev_private_size = sizeof(struct ena_adapter), }; -static int -rte_ena_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) -{ - rte_eth_driver_register(&rte_ena_pmd); - return 0; -}; - -struct rte_driver ena_pmd_drv = { - .name = "ena_driver", - .type = PMD_PDEV, - .init = rte_ena_pmd_init, -}; - -PMD_REGISTER_DRIVER(ena_pmd_drv, ena); -DRIVER_REGISTER_PCI_TABLE(ena, pci_id_ena_map); +RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv); +RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");