X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=191153a8fe6a5621c4dbb27cd48e6c8936279b9e;hb=e5df9f33db00eb9d322abaefff30da74fd0e625d;hp=dc253c384cb487a375e8a35f275388f7510b2147;hpb=5efb9fc7436a82ffc555556f3e1732562a1c1b8b;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index dc253c384c..191153a8fe 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -85,6 +85,9 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE +#define ENA_MIN_RING_DESC 128 + enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, @@ -204,7 +207,8 @@ static const struct rte_pci_id pci_id_ena_map[] = { static struct ena_aenq_handlers aenq_handlers; static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx); + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state); static int ena_dev_configure(struct rte_eth_dev *dev); static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); @@ -235,6 +239,8 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring); static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, int wait_to_complete); +static int ena_create_io_queue(struct ena_ring *ring); +static void ena_free_io_queues_all(struct ena_adapter *adapter); static int ena_queue_restart(struct ena_ring *ring); static int ena_queue_restart_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); @@ -367,6 +373,40 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) +{ + if (likely(req_id < rx_ring->ring_size)) + return 0; + + RTE_LOG(ERR, PMD, "Invalid rx req_id: %hu\n", req_id); + + rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID; + rx_ring->adapter->trigger_reset = true; + + return -EFAULT; +} + +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->mbuf)) + return 0; + } + + if (tx_info) + RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); + else + RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); + + /* Trigger device reset */ + tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + tx_ring->adapter->trigger_reset = true; + return -EFAULT; +} + static void ena_config_host_info(struct ena_com_dev *ena_dev) { struct ena_admin_host_info *host_info; @@ -398,9 +438,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) rc = ena_com_set_host_attributes(ena_dev); if (rc) { - RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -451,9 +494,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(&adapter->ena_dev); if (rc) { - RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -466,7 +512,8 @@ static void ena_close(struct rte_eth_dev *dev) struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); - ena_stop(dev); + if (adapter->state == ENA_ADAPTER_STATE_RUNNING) + ena_stop(dev); adapter->state = ENA_ADAPTER_STATE_CLOSED; ena_rx_queue_release_all(dev); @@ -485,6 +532,7 @@ ena_dev_reset(struct rte_eth_dev *dev) struct ena_adapter *adapter; int nb_queues; int rc, i; + bool wd_state; adapter = (struct ena_adapter *)(dev->data->dev_private); ena_dev = &adapter->ena_dev; @@ -495,7 +543,9 @@ ena_dev_reset(struct rte_eth_dev *dev) ena_com_set_admin_running_state(ena_dev, false); - ena_com_dev_reset(ena_dev, adapter->reset_reason); + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed\n"); for (i = 0; i < nb_queues; i++) mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; @@ -510,11 +560,12 @@ ena_dev_reset(struct rte_eth_dev *dev) ena_com_admin_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev); - rc = ena_device_init(ena_dev, &get_feat_ctx); + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Cannot initialize device\n"); return rc; } + adapter->wd_state = wd_state; rte_intr_enable(intr_handle); ena_com_set_admin_polling_mode(ena_dev, false); @@ -539,7 +590,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret, i; + int rc, i; u16 entry_value; int conf_idx; int idx; @@ -551,8 +602,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, RTE_LOG(WARNING, PMD, "indirection table %d is bigger than supported (%d)\n", reta_size, ENA_RX_RSS_TABLE_SIZE); - ret = -EINVAL; - goto err; + return -EINVAL; } for (i = 0 ; i < reta_size ; i++) { @@ -564,29 +614,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { entry_value = ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - ret = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } } } - ret = ena_com_indirect_table_set(ena_dev); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", __func__, reta_size, adapter->rte_dev->data->port_id); -err: - return ret; + + return 0; } /* Query redirection table. */ @@ -597,7 +646,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret; + int rc; int i; u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; int reta_conf_idx; @@ -607,11 +656,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) return -EINVAL; - ret = ena_com_indirect_table_get(ena_dev, indirect_table); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "cannot get indirect table\n"); - ret = -ENOTSUP; - goto err; + return -ENOTSUP; } for (i = 0 ; i < reta_size ; i++) { @@ -621,8 +669,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, reta_conf[reta_conf_idx].reta[reta_idx] = ENA_IO_RXQ_IDX_REV(indirect_table[i]); } -err: - return ret; + + return 0; } static int ena_rss_init_default(struct ena_adapter *adapter) @@ -701,26 +749,21 @@ static void ena_tx_queue_release_all(struct rte_eth_dev *dev) static void ena_rx_queue_release(void *queue) { struct ena_ring *ring = (struct ena_ring *)queue; - struct ena_adapter *adapter = ring->adapter; - int ena_qid; ena_assert_msg(ring->configured, "API violation - releasing not configured queue"); ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, "API violation"); - /* Destroy HW queue */ - ena_qid = ENA_IO_RXQ_IDX(ring->id); - ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); - - /* Free all bufs */ - ena_rx_queue_release_bufs(ring); - /* Free ring resources */ if (ring->rx_buffer_info) rte_free(ring->rx_buffer_info); ring->rx_buffer_info = NULL; + if (ring->empty_rx_reqs) + rte_free(ring->empty_rx_reqs); + ring->empty_rx_reqs = NULL; + ring->configured = 0; RTE_LOG(NOTICE, PMD, "RX Queue %d:%d released\n", @@ -730,18 +773,12 @@ static void ena_rx_queue_release(void *queue) static void ena_tx_queue_release(void *queue) { struct ena_ring *ring = (struct ena_ring *)queue; - struct ena_adapter *adapter = ring->adapter; - int ena_qid; ena_assert_msg(ring->configured, "API violation. Releasing not configured queue"); ena_assert_msg(ring->adapter->state != ENA_ADAPTER_STATE_RUNNING, "API violation"); - /* Destroy HW queue */ - ena_qid = ENA_IO_TXQ_IDX(ring->id); - ena_com_destroy_io_queue(&adapter->ena_dev, ena_qid); - /* Free all bufs */ ena_tx_queue_release_bufs(ring); @@ -799,7 +836,7 @@ static int ena_link_update(struct rte_eth_dev *dev, adapter = (struct ena_adapter *)(dev->data->dev_private); link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; return 0; @@ -840,7 +877,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, PMD_INIT_LOG(ERR, "failed to restart queue %d type(%d)", i, ring_type); - return -1; + return rc; } } } @@ -864,9 +901,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = ena_get_mtu_conf(adapter); - if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); - return -1; + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d\n", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; } return 0; @@ -874,6 +913,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) static int ena_calc_queue_size(struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, struct ena_com_dev_get_features_ctx *get_feat_ctx) { uint32_t queue_size = ENA_DEFAULT_RING_SIZE; @@ -891,11 +931,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, if (!rte_is_power_of_2(queue_size)) queue_size = rte_align32pow2(queue_size >> 1); - if (queue_size == 0) { + if (unlikely(queue_size == 0)) { PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } + *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + return queue_size; } @@ -960,12 +1003,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - if (mtu > ena_get_mtu_conf(adapter)) { + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, - "Given MTU (%d) exceeds maximum MTU supported (%d)\n", - mtu, ena_get_mtu_conf(adapter)); - rc = -EINVAL; - goto err; + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; } rc = ena_com_set_dev_mtu(ena_dev, mtu); @@ -974,7 +1017,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); -err: return rc; } @@ -998,7 +1040,7 @@ static int ena_start(struct rte_eth_dev *dev) return rc; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG) { + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) return rc; @@ -1024,10 +1066,86 @@ static void ena_stop(struct rte_eth_dev *dev) (struct ena_adapter *)(dev->data->dev_private); rte_timer_stop_sync(&adapter->timer_wd); + ena_free_io_queues_all(adapter); adapter->state = ENA_ADAPTER_STATE_STOPPED; } +static int ena_create_io_queue(struct ena_ring *ring) +{ + struct ena_adapter *adapter; + struct ena_com_dev *ena_dev; + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + 0, 0, 0, 0, 0 }; + uint16_t ena_qid; + int rc; + + adapter = ring->adapter; + ena_dev = &adapter->ena_dev; + + if (ring->type == ENA_RING_TYPE_TX) { + ena_qid = ENA_IO_TXQ_IDX(ring->id); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.queue_size = adapter->tx_ring_size; + } else { + ena_qid = ENA_IO_RXQ_IDX(ring->id); + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.queue_size = adapter->rx_ring_size; + } + ctx.qid = ena_qid; + ctx.msix_vector = -1; /* interrupts not used */ + ctx.numa_node = ena_cpu_to_node(ring->id); + + rc = ena_com_create_io_queue(ena_dev, &ctx); + if (rc) { + RTE_LOG(ERR, PMD, + "failed to create io queue #%d (qid:%d) rc: %d\n", + ring->id, ena_qid, rc); + return rc; + } + + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &ring->ena_com_io_sq, + &ring->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get io queue handlers. queue num %d rc: %d\n", + ring->id, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; + } + + if (ring->type == ENA_RING_TYPE_TX) + ena_com_update_numa_node(ring->ena_com_io_cq, ctx.numa_node); + + return 0; +} + +static void ena_free_io_queues_all(struct ena_adapter *adapter) +{ + struct rte_eth_dev *eth_dev = adapter->rte_dev; + struct ena_com_dev *ena_dev = &adapter->ena_dev; + int i; + uint16_t ena_qid; + uint16_t nb_rxq = eth_dev->data->nb_rx_queues; + uint16_t nb_txq = eth_dev->data->nb_tx_queues; + + for (i = 0; i < nb_txq; ++i) { + ena_qid = ENA_IO_TXQ_IDX(i); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + + for (i = 0; i < nb_rxq; ++i) { + ena_qid = ENA_IO_RXQ_IDX(i); + ena_com_destroy_io_queue(ena_dev, ena_qid); + + ena_rx_queue_release_bufs(&adapter->rx_ring[i]); + } +} + static int ena_queue_restart(struct ena_ring *ring) { int rc, bufs_num; @@ -1035,6 +1153,12 @@ static int ena_queue_restart(struct ena_ring *ring) ena_assert_msg(ring->configured == 1, "Trying to restart unconfigured queue\n"); + rc = ena_create_io_queue(ring); + if (rc) { + PMD_INIT_LOG(ERR, "Failed to create IO queue!\n"); + return rc; + } + ring->next_to_clean = 0; ring->next_to_use = 0; @@ -1045,7 +1169,7 @@ static int ena_queue_restart(struct ena_ring *ring) rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); - return (-1); + return ENA_COM_FAULT; } return 0; @@ -1057,17 +1181,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, __rte_unused unsigned int socket_id, const struct rte_eth_txconf *tx_conf) { - struct ena_com_create_io_ctx ctx = - /* policy set to _HOST just to satisfy icc compiler */ - { ENA_ADMIN_PLACEMENT_POLICY_HOST, - ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; struct ena_ring *txq = NULL; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); unsigned int i; - int ena_qid; - int rc; - struct ena_com_dev *ena_dev = &adapter->ena_dev; txq = &adapter->tx_ring[queue_idx]; @@ -1075,12 +1192,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1092,35 +1209,6 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - ena_qid = ENA_IO_TXQ_IDX(queue_idx); - - ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; - ctx.qid = ena_qid; - ctx.msix_vector = -1; /* admin interrupts not used */ - ctx.mem_queue_type = ena_dev->tx_mem_queue_type; - ctx.queue_size = adapter->tx_ring_size; - ctx.numa_node = ena_cpu_to_node(queue_idx); - - rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) { - RTE_LOG(ERR, PMD, - "failed to create io TX queue #%d (qid:%d) rc: %d\n", - queue_idx, ena_qid, rc); - } - txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; - txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; - - rc = ena_com_get_io_handlers(ena_dev, ena_qid, - &txq->ena_com_io_sq, - &txq->ena_com_io_cq); - if (rc) { - RTE_LOG(ERR, PMD, - "Failed to get TX queue handlers. TX queue num %d rc: %d\n", - queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - goto err; - } - txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -1143,6 +1231,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, rte_free(txq->tx_buffer_info); return -ENOMEM; } + for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; @@ -1154,8 +1243,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; -err: - return rc; + + return 0; } static int ena_rx_queue_setup(struct rte_eth_dev *dev, @@ -1165,28 +1254,22 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - struct ena_com_create_io_ctx ctx = - /* policy set to _HOST just to satisfy icc compiler */ - { ENA_ADMIN_PLACEMENT_POLICY_HOST, - ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; - uint16_t ena_qid = 0; - int rc = 0; - struct ena_com_dev *ena_dev = &adapter->ena_dev; + int i; rxq = &adapter->rx_ring[queue_idx]; if (rxq->configured) { RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1198,33 +1281,6 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } - ena_qid = ENA_IO_RXQ_IDX(queue_idx); - - ctx.qid = ena_qid; - ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; - ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - ctx.msix_vector = -1; /* admin interrupts not used */ - ctx.queue_size = adapter->rx_ring_size; - ctx.numa_node = ena_cpu_to_node(queue_idx); - - rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) - RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", - queue_idx, rc); - - rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; - rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; - - rc = ena_com_get_io_handlers(ena_dev, ena_qid, - &rxq->ena_com_io_sq, - &rxq->ena_com_io_cq); - if (rc) { - RTE_LOG(ERR, PMD, - "Failed to get RX queue handlers. RX queue num %d rc: %d\n", - queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - } - rxq->port_id = dev->data->port_id; rxq->next_to_clean = 0; rxq->next_to_use = 0; @@ -1239,11 +1295,24 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } + rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE); + if (!rxq->empty_rx_reqs) { + RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); + rte_free(rxq->rx_buffer_info); + rxq->rx_buffer_info = NULL; + return -ENOMEM; + } + + for (i = 0; i < nb_desc; i++) + rxq->empty_tx_reqs[i] = i; + /* Store pointer to this queue in upper layer */ rxq->configured = 1; dev->data->rx_queues[queue_idx] = rxq; - return rc; + return 0; } static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) @@ -1253,7 +1322,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) uint16_t ring_size = rxq->ring_size; uint16_t ring_mask = ring_size - 1; uint16_t next_to_use = rxq->next_to_use; - uint16_t in_use; + uint16_t in_use, req_id; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) @@ -1281,12 +1350,18 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); + + req_id = rxq->empty_rx_reqs[next_to_use_masked]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc < 0)) + break; + /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use_masked); + &ebuf, req_id); if (unlikely(rc)) { rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), count - i); @@ -1296,9 +1371,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use++; } + if (unlikely(i < count)) + RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + /* When we submitted free recources to device... */ - if (i > 0) { - /* ...let HW know that it can fill buffers with data */ + if (likely(i > 0)) { + /* ...let HW know that it can fill buffers with data + * + * Add memory barrier to make sure the desc were written before + * issue a doorbell + */ rte_wmb(); ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1309,7 +1392,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } static int ena_device_init(struct ena_com_dev *ena_dev, - struct ena_com_dev_get_features_ctx *get_feat_ctx) + struct ena_com_dev_get_features_ctx *get_feat_ctx, + bool *wd_state) { uint32_t aenq_groups; int rc; @@ -1372,7 +1456,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_NOTIFICATION) | - BIT(ENA_ADMIN_KEEP_ALIVE); + BIT(ENA_ADMIN_KEEP_ALIVE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); @@ -1381,6 +1467,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, goto err_admin_init; } + *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); + return 0; err_admin_init: @@ -1398,12 +1486,15 @@ static void ena_interrupt_handler_rte(void *cb_arg) struct ena_com_dev *ena_dev = &adapter->ena_dev; ena_com_admin_q_comp_intr_handler(ena_dev); - if (adapter->state != ENA_ADAPTER_STATE_CLOSED) + if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) ena_com_aenq_intr_handler(ena_dev, adapter); } static void check_for_missing_keep_alive(struct ena_adapter *adapter) { + if (!adapter->wd_state) + return; + if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) return; @@ -1441,6 +1532,24 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, } } +static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_cq_num, io_queue_num; + + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + io_cq_num = get_feat_ctx->max_queues.max_cq_num; + + io_queue_num = RTE_MIN(io_sq_num, io_cq_num); + + if (unlikely(io_queue_num == 0)) { + RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); + return -EFAULT; + } + + return io_queue_num; +} + static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; @@ -1450,8 +1559,10 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) struct ena_com_dev *ena_dev = &adapter->ena_dev; struct ena_com_dev_get_features_ctx get_feat_ctx; int queue_size, rc; + u16 tx_sgl_size = 0; static int adapters_found; + bool wd_state; memset(adapter, 0, sizeof(struct ena_adapter)); ena_dev = &adapter->ena_dev; @@ -1495,22 +1606,28 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->id_number); /* device specific initialization routine */ - rc = ena_device_init(ena_dev, &get_feat_ctx); + rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device"); - return -1; + goto err; } + adapter->wd_state = wd_state; ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; + adapter->num_queues = ena_calc_io_queue_num(ena_dev, + &get_feat_ctx); - queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); - if ((queue_size <= 0) || (adapter->num_queues <= 0)) - return -EFAULT; + queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); + if (queue_size <= 0 || adapter->num_queues <= 0) { + rc = -EFAULT; + goto err_device_destroy; + } adapter->tx_ring_size = queue_size; adapter->rx_ring_size = queue_size; + adapter->max_tx_sgl_size = tx_sgl_size; + /* prepare ring structures */ ena_init_rings(adapter); @@ -1533,7 +1650,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) RTE_CACHE_LINE_SIZE); if (!adapter->drv_stats) { RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_delete_debug_area; } rte_intr_callback_register(intr_handle, @@ -1551,6 +1669,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->state = ENA_ADAPTER_STATE_INIT; return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; } static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) @@ -1561,7 +1689,7 @@ static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) (struct ena_adapter *)(eth_dev->data->dev_private); if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return -EPERM; + return 0; if (adapter->state != ENA_ADAPTER_STATE_CLOSED) ena_close(eth_dev); @@ -1609,6 +1737,7 @@ static void ena_init_rings(struct ena_adapter *adapter) ring->id = i; ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + ring->sgl_size = adapter->max_tx_sgl_size; } for (i = 0; i < adapter->num_queues; i++) { @@ -1689,6 +1818,16 @@ static void ena_infos_get(struct rte_eth_dev *dev, adapter->tx_supported_offloads = tx_feat; adapter->rx_supported_offloads = rx_feat; + + dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; + + dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); + dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); } static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1699,6 +1838,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; uint16_t desc_in_use = 0; + uint16_t req_id; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1732,6 +1872,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, &ena_rx_ctx); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->trigger_reset = true; return 0; } @@ -1739,12 +1880,17 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, break; while (segments < ena_rx_ctx.descs) { - mbuf = rx_buff_info[next_to_clean & ring_mask]; + req_id = ena_rx_ctx.ena_bufs[segments].req_id; + rc = validate_rx_req_id(rx_ring, req_id); + if (unlikely(rc)) + break; + + mbuf = rx_buff_info[req_id]; mbuf->data_len = ena_rx_ctx.ena_bufs[segments].len; mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; mbuf->next = NULL; - if (segments == 0) { + if (unlikely(segments == 0)) { mbuf->nb_segs = ena_rx_ctx.descs; mbuf->port = rx_ring->port_id; mbuf->pkt_len = 0; @@ -1756,13 +1902,15 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_head->pkt_len += mbuf->data_len; mbuf_prev = mbuf; + rx_ring->empty_rx_reqs[next_to_clean & ring_mask] = + req_id; segments++; next_to_clean++; } /* fill mbuf attributes if any */ ena_rx_mbuf_prepare(mbuf_head, &ena_rx_ctx); - mbuf_head->hash.rss = (uint32_t)rx_ring->id; + mbuf_head->hash.rss = ena_rx_ctx.hash; /* pass to DPDK application head mbuf */ rx_pkts[recv_idx] = mbuf_head; @@ -1872,6 +2020,23 @@ static void ena_update_hints(struct ena_adapter *adapter, } } +static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + struct rte_mbuf *mbuf) +{ + int num_segments, rc; + + num_segments = mbuf->nb_segs; + + if (likely(num_segments < tx_ring->sgl_size)) + return 0; + + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) + RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); + + return rc; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1902,6 +2067,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + break; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; @@ -1979,6 +2148,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + /* Get Tx info & store how many descs were processed */ tx_info = &tx_ring->tx_buffer_info[req_id]; total_tx_descs += tx_info->tx_descs; @@ -2023,7 +2196,8 @@ static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ena_pmd = { .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_WC_ACTIVATE, .probe = eth_ena_pci_probe, .remove = eth_ena_pci_remove, }; @@ -2032,9 +2206,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(ena_init_log); -static void -ena_init_log(void) +RTE_INIT(ena_init_log) { ena_logtype_init = rte_log_register("pmd.net.ena.init"); if (ena_logtype_init >= 0) @@ -2103,7 +2275,8 @@ static void ena_keep_alive(void *adapter_data, static void unimplemented_aenq_handler(__rte_unused void *data, __rte_unused struct ena_admin_aenq_entry *aenq_e) { - // Unimplemented handler + RTE_LOG(ERR, PMD, "Unknown event was received or event with " + "unimplemented handler\n"); } static struct ena_aenq_handlers aenq_handlers = {