X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=14f776b5ad2a96297b7e5e5a1e4b0a01f925a409;hb=a9e3a4a9e2dc583d827fbb4c7e427aa5af98281a;hp=f5e812d50732cef845f06f90e121dace41db693a;hpb=617898d12a7232fa3fd5c16a04efc780c1a6f493;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index f5e812d507..14f776b5ad 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -4,12 +4,6 @@ */ #include -#include -#include -#include -#include -#include -#include #include #include #include @@ -27,24 +21,15 @@ #include #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 3 +#define DRV_MODULE_VER_MINOR 4 #define DRV_MODULE_VER_SUBMINOR 0 -#define ENA_IO_TXQ_IDX(q) (2 * (q)) -#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) -/*reverse version of ENA_IO_RXQ_IDX*/ -#define ENA_IO_RXQ_IDX_REV(q) ((q - 1) / 2) - #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) -#define TEST_BIT(val, bit_shift) (val & (1UL << bit_shift)) #define GET_L4_HDR_LEN(mbuf) \ ((rte_pktmbuf_mtod_offset(mbuf, struct rte_tcp_hdr *, \ mbuf->l3_len + mbuf->l2_len)->data_off) >> 4) -#define ENA_RX_RSS_TABLE_LOG_SIZE 7 -#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) -#define ENA_HASH_KEY_SIZE 40 #define ETH_GSTRING_LEN 32 #define ARRAY_SIZE(x) RTE_DIM(x) @@ -213,22 +198,16 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring); static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static int ena_create_io_queue(struct ena_ring *ring); +static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring); static void ena_queue_stop(struct ena_ring *ring); static void ena_queue_stop_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); -static int ena_queue_start(struct ena_ring *ring); +static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring); static int ena_queue_start_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); static int ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); -static int ena_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size); -static int ena_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size); static void ena_interrupt_handler_rte(void *cb_arg); static void ena_timer_wd_callback(struct rte_timer *timer, void *arg); static void ena_destroy_device(struct rte_eth_dev *eth_dev); @@ -249,6 +228,11 @@ static int ena_process_bool_devarg(const char *key, static int ena_parse_devargs(struct ena_adapter *adapter, struct rte_devargs *devargs); static int ena_copy_eni_stats(struct ena_adapter *adapter); +static int ena_setup_rx_intr(struct rte_eth_dev *dev); +static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id); +static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id); static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, @@ -269,27 +253,15 @@ static const struct eth_dev_ops ena_dev_ops = { .dev_reset = ena_dev_reset, .reta_update = ena_rss_reta_update, .reta_query = ena_rss_reta_query, + .rx_queue_intr_enable = ena_rx_queue_intr_enable, + .rx_queue_intr_disable = ena_rx_queue_intr_disable, + .rss_hash_update = ena_rss_hash_update, + .rss_hash_conf_get = ena_rss_hash_conf_get, }; -void ena_rss_key_fill(void *key, size_t size) -{ - static bool key_generated; - static uint8_t default_key[ENA_HASH_KEY_SIZE]; - size_t i; - - RTE_ASSERT(size <= ENA_HASH_KEY_SIZE); - - if (!key_generated) { - for (i = 0; i < ENA_HASH_KEY_SIZE; ++i) - default_key[i] = rte_rand() & 0xff; - key_generated = true; - } - - rte_memcpy(key, default_key, size); -} - static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, - struct ena_com_rx_ctx *ena_rx_ctx) + struct ena_com_rx_ctx *ena_rx_ctx, + bool fill_hash) { uint64_t ol_flags = 0; uint32_t packet_type = 0; @@ -317,7 +289,8 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, else ol_flags |= PKT_RX_L4_CKSUM_GOOD; - if (likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { + if (fill_hash && + likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { ol_flags |= PKT_RX_RSS_HASH; mbuf->hash.rss = ena_rx_ctx->hash; } @@ -399,9 +372,9 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) } if (tx_info) - PMD_DRV_LOG(ERR, "tx_info doesn't have valid mbuf\n"); + PMD_TX_LOG(ERR, "tx_info doesn't have valid mbuf\n"); else - PMD_DRV_LOG(ERR, "Invalid req_id: %hu\n", req_id); + PMD_TX_LOG(ERR, "Invalid req_id: %hu\n", req_id); /* Trigger device reset */ ++tx_ring->tx_stats.bad_req_id; @@ -439,7 +412,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) host_info->num_cpus = rte_lcore_count(); host_info->driver_supported_features = - ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK; + ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK | + ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK; rc = ena_com_set_host_attributes(ena_dev); if (rc) { @@ -549,151 +523,6 @@ ena_dev_reset(struct rte_eth_dev *dev) return rc; } -static int ena_rss_reta_update(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - struct ena_adapter *adapter = dev->data->dev_private; - struct ena_com_dev *ena_dev = &adapter->ena_dev; - int rc, i; - u16 entry_value; - int conf_idx; - int idx; - - if ((reta_size == 0) || (reta_conf == NULL)) - return -EINVAL; - - if (reta_size > ENA_RX_RSS_TABLE_SIZE) { - PMD_DRV_LOG(WARNING, - "Requested indirection table size (%d) is bigger than supported: %d\n", - reta_size, ENA_RX_RSS_TABLE_SIZE); - return -EINVAL; - } - - for (i = 0 ; i < reta_size ; i++) { - /* each reta_conf is for 64 entries. - * to support 128 we use 2 conf of 64 - */ - conf_idx = i / RTE_RETA_GROUP_SIZE; - idx = i % RTE_RETA_GROUP_SIZE; - if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { - entry_value = - ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - - rc = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { - PMD_DRV_LOG(ERR, - "Cannot fill indirect table\n"); - return rc; - } - } - } - - rte_spinlock_lock(&adapter->admin_lock); - rc = ena_com_indirect_table_set(ena_dev); - rte_spinlock_unlock(&adapter->admin_lock); - if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { - PMD_DRV_LOG(ERR, "Cannot flush the indirect table\n"); - return rc; - } - - PMD_DRV_LOG(DEBUG, "RSS configured %d entries for port %d\n", - reta_size, dev->data->port_id); - - return 0; -} - -/* Query redirection table. */ -static int ena_rss_reta_query(struct rte_eth_dev *dev, - struct rte_eth_rss_reta_entry64 *reta_conf, - uint16_t reta_size) -{ - struct ena_adapter *adapter = dev->data->dev_private; - struct ena_com_dev *ena_dev = &adapter->ena_dev; - int rc; - int i; - u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; - int reta_conf_idx; - int reta_idx; - - if (reta_size == 0 || reta_conf == NULL || - (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) - return -EINVAL; - - rte_spinlock_lock(&adapter->admin_lock); - rc = ena_com_indirect_table_get(ena_dev, indirect_table); - rte_spinlock_unlock(&adapter->admin_lock); - if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { - PMD_DRV_LOG(ERR, "Cannot get indirection table\n"); - return -ENOTSUP; - } - - for (i = 0 ; i < reta_size ; i++) { - reta_conf_idx = i / RTE_RETA_GROUP_SIZE; - reta_idx = i % RTE_RETA_GROUP_SIZE; - if (TEST_BIT(reta_conf[reta_conf_idx].mask, reta_idx)) - reta_conf[reta_conf_idx].reta[reta_idx] = - ENA_IO_RXQ_IDX_REV(indirect_table[i]); - } - - return 0; -} - -static int ena_rss_init_default(struct ena_adapter *adapter) -{ - struct ena_com_dev *ena_dev = &adapter->ena_dev; - uint16_t nb_rx_queues = adapter->edev_data->nb_rx_queues; - int rc, i; - u32 val; - - rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); - if (unlikely(rc)) { - PMD_DRV_LOG(ERR, "Cannot init indirection table\n"); - goto err_rss_init; - } - - for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) { - val = i % nb_rx_queues; - rc = ena_com_indirect_table_fill_entry(ena_dev, i, - ENA_IO_RXQ_IDX(val)); - if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { - PMD_DRV_LOG(ERR, "Cannot fill indirection table\n"); - goto err_fill_indir; - } - } - - rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, - ENA_HASH_KEY_SIZE, 0xFFFFFFFF); - if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { - PMD_DRV_LOG(INFO, "Cannot fill hash function\n"); - goto err_fill_indir; - } - - rc = ena_com_set_default_hash_ctrl(ena_dev); - if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { - PMD_DRV_LOG(INFO, "Cannot fill hash control\n"); - goto err_fill_indir; - } - - rc = ena_com_indirect_table_set(ena_dev); - if (unlikely(rc && (rc != ENA_COM_UNSUPPORTED))) { - PMD_DRV_LOG(ERR, "Cannot flush indirection table\n"); - goto err_fill_indir; - } - PMD_DRV_LOG(DEBUG, "RSS configured for port %d\n", - adapter->edev_data->port_id); - - return 0; - -err_fill_indir: - ena_com_rss_destroy(ena_dev); -err_rss_init: - - return rc; -} - static void ena_rx_queue_release_all(struct rte_eth_dev *dev) { struct ena_ring **queues = (struct ena_ring **)dev->data->rx_queues; @@ -829,7 +658,7 @@ static int ena_queue_start_all(struct rte_eth_dev *dev, "Inconsistent state of Tx queues\n"); } - rc = ena_queue_start(&queues[i]); + rc = ena_queue_start(dev, &queues[i]); if (rc) { PMD_INIT_LOG(ERR, @@ -1074,6 +903,10 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) return rc; + rc = ena_setup_rx_intr(dev); + if (rc) + return rc; + rc = ena_queue_start_all(dev, ENA_RING_TYPE_RX); if (rc) return rc; @@ -1082,9 +915,8 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) goto err_start_tx; - if (adapter->edev_data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG && adapter->edev_data->nb_rx_queues > 0) { - rc = ena_rss_init_default(adapter); + if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + rc = ena_rss_configure(adapter); if (rc) goto err_rss_init; } @@ -1114,6 +946,8 @@ static int ena_stop(struct rte_eth_dev *dev) { struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; int rc; /* Cannot free memory in secondary process */ @@ -1132,6 +966,16 @@ static int ena_stop(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "Device reset failed, rc: %d\n", rc); } + rte_intr_disable(intr_handle); + + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec != NULL) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + rte_intr_enable(intr_handle); + ++adapter->dev_stats.dev_stop; adapter->state = ENA_ADAPTER_STATE_STOPPED; dev->data->dev_started = 0; @@ -1139,10 +983,12 @@ static int ena_stop(struct rte_eth_dev *dev) return 0; } -static int ena_create_io_queue(struct ena_ring *ring) +static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) { - struct ena_adapter *adapter; - struct ena_com_dev *ena_dev; + struct ena_adapter *adapter = ring->adapter; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ena_com_create_io_ctx ctx = /* policy set to _HOST just to satisfy icc compiler */ { ENA_ADMIN_PLACEMENT_POLICY_HOST, @@ -1151,9 +997,7 @@ static int ena_create_io_queue(struct ena_ring *ring) unsigned int i; int rc; - adapter = ring->adapter; - ena_dev = &adapter->ena_dev; - + ctx.msix_vector = -1; if (ring->type == ENA_RING_TYPE_TX) { ena_qid = ENA_IO_TXQ_IDX(ring->id); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; @@ -1163,12 +1007,13 @@ static int ena_create_io_queue(struct ena_ring *ring) } else { ena_qid = ENA_IO_RXQ_IDX(ring->id); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + if (rte_intr_dp_is_en(intr_handle)) + ctx.msix_vector = intr_handle->intr_vec[ring->id]; for (i = 0; i < ring->ring_size; i++) ring->empty_rx_reqs[i] = i; } ctx.queue_size = ring->ring_size; ctx.qid = ena_qid; - ctx.msix_vector = -1; /* interrupts not used */ ctx.numa_node = ring->numa_socket_id; rc = ena_com_create_io_queue(ena_dev, &ctx); @@ -1193,6 +1038,10 @@ static int ena_create_io_queue(struct ena_ring *ring) if (ring->type == ENA_RING_TYPE_TX) ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); + /* Start with Rx interrupts being masked. */ + if (ring->type == ENA_RING_TYPE_RX && rte_intr_dp_is_en(intr_handle)) + ena_rx_queue_intr_disable(dev, ring->id); + return 0; } @@ -1229,14 +1078,14 @@ static void ena_queue_stop_all(struct rte_eth_dev *dev, ena_queue_stop(&queues[i]); } -static int ena_queue_start(struct ena_ring *ring) +static int ena_queue_start(struct rte_eth_dev *dev, struct ena_ring *ring) { int rc, bufs_num; ena_assert_msg(ring->configured == 1, "Trying to start unconfigured queue\n"); - rc = ena_create_io_queue(ring); + rc = ena_create_io_queue(dev, ring); if (rc) { PMD_INIT_LOG(ERR, "Failed to create IO queue\n"); return rc; @@ -1357,7 +1206,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __rte_unused const struct rte_eth_rxconf *rx_conf, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { struct ena_adapter *adapter = dev->data->dev_private; @@ -1441,6 +1290,8 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, for (i = 0; i < nb_desc; i++) rxq->empty_rx_reqs[i] = i; + rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; + /* Store pointer to this queue in upper layer */ rxq->configured = 1; dev->data->rx_queues[queue_idx] = rxq; @@ -1461,7 +1312,7 @@ static int ena_add_single_rx_desc(struct ena_com_io_sq *io_sq, /* pass resource to device */ rc = ena_com_add_single_rx_desc(io_sq, &ebuf, id); if (unlikely(rc != 0)) - PMD_DRV_LOG(WARNING, "Failed adding Rx desc\n"); + PMD_RX_LOG(WARNING, "Failed adding Rx desc\n"); return rc; } @@ -1471,16 +1322,21 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) unsigned int i; int rc; uint16_t next_to_use = rxq->next_to_use; - uint16_t in_use, req_id; + uint16_t req_id; +#ifdef RTE_ETHDEV_DEBUG_RX + uint16_t in_use; +#endif struct rte_mbuf **mbufs = rxq->rx_refill_buffer; if (unlikely(!count)) return 0; +#ifdef RTE_ETHDEV_DEBUG_RX in_use = rxq->ring_size - 1 - ena_com_free_q_entries(rxq->ena_com_io_sq); - ena_assert_msg(((in_use + count) < rxq->ring_size), - "bad ring state\n"); + if (unlikely((in_use + count) >= rxq->ring_size)) + PMD_RX_LOG(ERR, "Bad Rx ring state\n"); +#endif /* get resources for incoming packets */ rc = rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, count); @@ -1510,7 +1366,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } if (unlikely(i < count)) { - PMD_DRV_LOG(WARNING, + PMD_RX_LOG(WARNING, "Refilled Rx queue[%d] with only %d/%d buffers\n", rxq->id, i, count); rte_pktmbuf_free_bulk(&mbufs[i], count - i); @@ -1899,6 +1755,9 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->offloads.rx_csum_supported = (get_feat_ctx.offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) != 0; + adapter->offloads.rss_hash_supported = + (get_feat_ctx.offload.rx_supported & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_HASH_MASK) != 0; /* Copy MAC address and point DPDK to it */ eth_dev->data->mac_addrs = (struct rte_ether_addr *)adapter->mac_addr; @@ -1906,6 +1765,12 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) get_feat_ctx.dev_attr.mac_addr, (struct rte_ether_addr *)adapter->mac_addr); + rc = ena_com_rss_init(ena_dev, ENA_RX_RSS_TABLE_LOG_SIZE); + if (unlikely(rc != 0)) { + PMD_DRV_LOG(ERR, "Failed to initialize RSS in ENA device\n"); + goto err_delete_debug_area; + } + adapter->drv_stats = rte_zmalloc("adapter stats", sizeof(*adapter->drv_stats), RTE_CACHE_LINE_SIZE); @@ -1913,7 +1778,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) PMD_DRV_LOG(ERR, "Failed to allocate memory for adapter statistics\n"); rc = -ENOMEM; - goto err_delete_debug_area; + goto err_rss_destroy; } rte_spinlock_init(&adapter->admin_lock); @@ -1934,6 +1799,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) return 0; +err_rss_destroy: + ena_com_rss_destroy(ena_dev); err_delete_debug_area: ena_com_delete_debug_area(ena_dev); @@ -1958,6 +1825,8 @@ static void ena_destroy_device(struct rte_eth_dev *eth_dev) if (adapter->state != ENA_ADAPTER_STATE_CLOSED) ena_close(eth_dev); + ena_com_rss_destroy(ena_dev); + ena_com_delete_debug_area(ena_dev); ena_com_delete_host_info(ena_dev); @@ -2064,13 +1933,14 @@ static int ena_infos_get(struct rte_eth_dev *dev, /* Inform framework about available features */ dev_info->rx_offload_capa = rx_feat; - dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH; + if (adapter->offloads.rss_hash_supported) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH; dev_info->rx_queue_offload_capa = rx_feat; dev_info->tx_offload_capa = tx_feat; dev_info->tx_queue_offload_capa = tx_feat; - dev_info->flow_type_rss_offloads = ETH_RSS_IP | ETH_RSS_TCP | - ETH_RSS_UDP; + dev_info->flow_type_rss_offloads = ENA_ALL_RSS_HF; + dev_info->hash_key_size = ENA_HASH_KEY_SIZE; dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN; dev_info->max_rx_pktlen = adapter->max_mtu; @@ -2217,13 +2087,18 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t completed; struct ena_com_rx_ctx ena_rx_ctx; int i, rc = 0; + bool fill_hash; +#ifdef RTE_ETHDEV_DEBUG_RX /* Check adapter state */ if (unlikely(rx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { - PMD_DRV_LOG(ALERT, + PMD_RX_LOG(ALERT, "Trying to receive pkts while device is NOT running\n"); return 0; } +#endif + + fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; descs_in_use = rx_ring->ring_size - ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; @@ -2239,7 +2114,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_ring->ena_com_io_sq, &ena_rx_ctx); if (unlikely(rc)) { - PMD_DRV_LOG(ERR, + PMD_RX_LOG(ERR, "Failed to get the packet from the device, rc: %d\n", rc); if (rc == ENA_COM_NO_SPACE) { @@ -2271,7 +2146,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } /* fill mbuf attributes if any */ - ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx); + ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); if (unlikely(mbuf->ol_flags & (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { @@ -2416,13 +2291,13 @@ static int ena_check_space_and_linearize_mbuf(struct ena_ring *tx_ring, * be needed so we reduce the segments number from num_segments to 1 */ if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, 3)) { - PMD_DRV_LOG(DEBUG, "Not enough space in the Tx queue\n"); + PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); return ENA_COM_NO_MEM; } ++tx_ring->tx_stats.linearize; rc = rte_pktmbuf_linearize(mbuf); if (unlikely(rc)) { - PMD_DRV_LOG(WARNING, "Mbuf linearize failed\n"); + PMD_TX_LOG(WARNING, "Mbuf linearize failed\n"); rte_atomic64_inc(&tx_ring->adapter->drv_stats->ierrors); ++tx_ring->tx_stats.linearize_failed; return rc; @@ -2436,7 +2311,7 @@ checkspace: */ if (!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, num_segments + 2)) { - PMD_DRV_LOG(DEBUG, "Not enough space in the Tx queue\n"); + PMD_TX_LOG(DEBUG, "Not enough space in the Tx queue\n"); return ENA_COM_NO_MEM; } @@ -2551,7 +2426,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) { - PMD_DRV_LOG(DEBUG, + PMD_TX_LOG(DEBUG, "LLQ Tx max burst size of queue %d achieved, writing doorbell to send burst\n", tx_ring->id); ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); @@ -2563,7 +2438,11 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, &nb_hw_desc); if (unlikely(rc)) { + PMD_DRV_LOG(ERR, "Failed to prepare Tx buffers, rc: %d\n", rc); ++tx_ring->tx_stats.prepare_ctx_err; + tx_ring->adapter->reset_reason = + ENA_REGS_RESET_DRIVER_INVALID_STATE; + tx_ring->adapter->trigger_reset = true; return rc; } @@ -2628,12 +2507,14 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); uint16_t sent_idx = 0; +#ifdef RTE_ETHDEV_DEBUG_TX /* Check adapter state */ if (unlikely(tx_ring->adapter->state != ENA_ADAPTER_STATE_RUNNING)) { - PMD_DRV_LOG(ALERT, + PMD_TX_LOG(ALERT, "Trying to xmit pkts while device is NOT running\n"); return 0; } +#endif for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { if (ena_xmit_mbuf(tx_ring, tx_pkts[sent_idx])) @@ -2931,6 +2812,100 @@ static int ena_parse_devargs(struct ena_adapter *adapter, return rc; } +static int ena_setup_rx_intr(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + int rc; + uint16_t vectors_nb, i; + bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; + + if (!rx_intr_requested) + return 0; + + if (!rte_intr_cap_multiple(intr_handle)) { + PMD_DRV_LOG(ERR, + "Rx interrupt requested, but it isn't supported by the PCI driver\n"); + return -ENOTSUP; + } + + /* Disable interrupt mapping before the configuration starts. */ + rte_intr_disable(intr_handle); + + /* Verify if there are enough vectors available. */ + vectors_nb = dev->data->nb_rx_queues; + if (vectors_nb > RTE_MAX_RXTX_INTR_VEC_ID) { + PMD_DRV_LOG(ERR, + "Too many Rx interrupts requested, maximum number: %d\n", + RTE_MAX_RXTX_INTR_VEC_ID); + rc = -ENOTSUP; + goto enable_intr; + } + + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); + if (intr_handle->intr_vec == NULL) { + PMD_DRV_LOG(ERR, + "Failed to allocate interrupt vector for %d queues\n", + dev->data->nb_rx_queues); + rc = -ENOMEM; + goto enable_intr; + } + + rc = rte_intr_efd_enable(intr_handle, vectors_nb); + if (rc != 0) + goto free_intr_vec; + + if (!rte_intr_allow_others(intr_handle)) { + PMD_DRV_LOG(ERR, + "Not enough interrupts available to use both ENA Admin and Rx interrupts\n"); + goto disable_intr_efd; + } + + for (i = 0; i < vectors_nb; ++i) + intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + + rte_intr_enable(intr_handle); + return 0; + +disable_intr_efd: + rte_intr_efd_disable(intr_handle); +free_intr_vec: + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; +enable_intr: + rte_intr_enable(intr_handle); + return rc; +} + +static void ena_rx_queue_intr_set(struct rte_eth_dev *dev, + uint16_t queue_id, + bool unmask) +{ + struct ena_adapter *adapter = dev->data->dev_private; + struct ena_ring *rxq = &adapter->rx_ring[queue_id]; + struct ena_eth_io_intr_reg intr_reg; + + ena_com_update_intr_reg(&intr_reg, 0, 0, unmask); + ena_com_unmask_intr(rxq->ena_com_io_cq, &intr_reg); +} + +static int ena_rx_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + ena_rx_queue_intr_set(dev, queue_id, true); + + return 0; +} + +static int ena_rx_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + ena_rx_queue_intr_set(dev, queue_id, false); + + return 0; +} + /********************************************************************* * PMD configuration *********************************************************************/ @@ -2960,18 +2935,13 @@ RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_ena, ENA_DEVARG_LARGE_LLQ_HDR "=<0|1>"); RTE_LOG_REGISTER_SUFFIX(ena_logtype_init, init, NOTICE); RTE_LOG_REGISTER_SUFFIX(ena_logtype_driver, driver, NOTICE); -#ifdef RTE_LIBRTE_ENA_DEBUG_RX -RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, NOTICE); -#endif -#ifdef RTE_LIBRTE_ENA_DEBUG_TX -RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, NOTICE); -#endif -#ifdef RTE_LIBRTE_ENA_DEBUG_TX_FREE -RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx_free, tx_free, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_RX +RTE_LOG_REGISTER_SUFFIX(ena_logtype_rx, rx, DEBUG); #endif -#ifdef RTE_LIBRTE_ENA_COM_DEBUG -RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, NOTICE); +#ifdef RTE_ETHDEV_DEBUG_TX +RTE_LOG_REGISTER_SUFFIX(ena_logtype_tx, tx, DEBUG); #endif +RTE_LOG_REGISTER_SUFFIX(ena_logtype_com, com, WARNING); /****************************************************************************** ******************************** AENQ Handlers *******************************