X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=634c97acf60d85f9df133c7abcebbe5547e63f9b;hb=a5a0a43bc62ed5c735ae482dbc4d8a7af4b95eab;hp=655c53b525ac9f9b4b0092b9ad24c42c15c30246;hpb=3a822d79c5da8ed65fd08a8188b9b7d4c35fe199;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 655c53b525..634c97acf6 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -21,7 +21,7 @@ #include #define DRV_MODULE_VER_MAJOR 2 -#define DRV_MODULE_VER_MINOR 4 +#define DRV_MODULE_VER_MINOR 5 #define DRV_MODULE_VER_SUBMINOR 0 #define __MERGE_64B_H_L(h, l) (((uint64_t)h << 32) | l) @@ -99,6 +99,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(bad_req_id), ENA_STAT_TX_ENTRY(available_desc), + ENA_STAT_TX_ENTRY(missed_tx), }; static const struct ena_stats ena_stats_rx_strings[] = { @@ -116,13 +117,13 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) -#define QUEUE_OFFLOADS (DEV_TX_OFFLOAD_TCP_CKSUM |\ - DEV_TX_OFFLOAD_UDP_CKSUM |\ - DEV_TX_OFFLOAD_IPV4_CKSUM |\ - DEV_TX_OFFLOAD_TCP_TSO) -#define MBUF_OFFLOADS (PKT_TX_L4_MASK |\ - PKT_TX_IP_CKSUM |\ - PKT_TX_TCP_SEG) +#define QUEUE_OFFLOADS (RTE_ETH_TX_OFFLOAD_TCP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM |\ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |\ + RTE_ETH_TX_OFFLOAD_TCP_TSO) +#define MBUF_OFFLOADS (RTE_MBUF_F_TX_L4_MASK |\ + RTE_MBUF_F_TX_IP_CKSUM |\ + RTE_MBUF_F_TX_TCP_SEG) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -130,15 +131,14 @@ static const struct ena_stats ena_stats_rx_strings[] = { #define PCI_DEVICE_ID_ENA_VF 0xEC20 #define PCI_DEVICE_ID_ENA_VF_RSERV0 0xEC21 -#define ENA_TX_OFFLOAD_MASK (\ - PKT_TX_L4_MASK | \ - PKT_TX_IPV6 | \ - PKT_TX_IPV4 | \ - PKT_TX_IP_CKSUM | \ - PKT_TX_TCP_SEG) +#define ENA_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_L4_MASK | \ + RTE_MBUF_F_TX_IPV6 | \ + RTE_MBUF_F_TX_IPV4 | \ + RTE_MBUF_F_TX_IP_CKSUM | \ + RTE_MBUF_F_TX_TCP_SEG) #define ENA_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) /** HW specific offloads capabilities. */ /* IPv4 checksum offload. */ @@ -295,24 +295,24 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) { packet_type |= RTE_PTYPE_L3_IPV4; if (unlikely(ena_rx_ctx->l3_csum_err)) - ol_flags |= PKT_RX_IP_CKSUM_BAD; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; else - ol_flags |= PKT_RX_IP_CKSUM_GOOD; + ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; } else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) { packet_type |= RTE_PTYPE_L3_IPV6; } if (!ena_rx_ctx->l4_csum_checked || ena_rx_ctx->frag) - ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; else if (unlikely(ena_rx_ctx->l4_csum_err)) - ol_flags |= PKT_RX_L4_CKSUM_BAD; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; else - ol_flags |= PKT_RX_L4_CKSUM_GOOD; + ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; if (fill_hash && likely((packet_type & ENA_PTYPE_HAS_HASH) && !ena_rx_ctx->frag)) { - ol_flags |= PKT_RX_RSS_HASH; + ol_flags |= RTE_MBUF_F_RX_RSS_HASH; mbuf->hash.rss = ena_rx_ctx->hash; } @@ -330,19 +330,19 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, if ((mbuf->ol_flags & MBUF_OFFLOADS) && (queue_offloads & QUEUE_OFFLOADS)) { /* check if TSO is required */ - if ((mbuf->ol_flags & PKT_TX_TCP_SEG) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_TSO)) { + if ((mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)) { ena_tx_ctx->tso_enable = true; ena_meta->l4_hdr_len = GET_L4_HDR_LEN(mbuf); } /* check if L3 checksum is needed */ - if ((mbuf->ol_flags & PKT_TX_IP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) + if ((mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) ena_tx_ctx->l3_csum_enable = true; - if (mbuf->ol_flags & PKT_TX_IPV6) { + if (mbuf->ol_flags & RTE_MBUF_F_TX_IPV6) { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; } else { ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; @@ -355,13 +355,13 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } /* check if L4 checksum is needed */ - if (((mbuf->ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) { + if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; ena_tx_ctx->l4_csum_enable = true; - } else if (((mbuf->ol_flags & PKT_TX_L4_MASK) == - PKT_TX_UDP_CKSUM) && - (queue_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)) { + } else if (((mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) == + RTE_MBUF_F_TX_UDP_CKSUM) && + (queue_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)) { ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; ena_tx_ctx->l4_csum_enable = true; } else { @@ -494,7 +494,7 @@ err: static int ena_close(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_adapter *adapter = dev->data->dev_private; int ret = 0; @@ -642,9 +642,9 @@ static int ena_link_update(struct rte_eth_dev *dev, struct rte_eth_link *link = &dev->data->dev_link; struct ena_adapter *adapter = dev->data->dev_private; - link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; - link->link_speed = ETH_SPEED_NUM_NONE; - link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_status = adapter->link_status ? RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN; + link->link_speed = RTE_ETH_SPEED_NUM_NONE; + link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX; return 0; } @@ -922,7 +922,7 @@ static int ena_start(struct rte_eth_dev *dev) if (rc) goto err_start_tx; - if (adapter->edev_data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (adapter->edev_data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { rc = ena_rss_configure(adapter); if (rc) goto err_rss_init; @@ -954,7 +954,7 @@ static int ena_stop(struct rte_eth_dev *dev) struct ena_adapter *adapter = dev->data->dev_private; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; /* Cannot free memory in secondary process */ @@ -976,10 +976,9 @@ static int ena_stop(struct rte_eth_dev *dev) rte_intr_disable(intr_handle); rte_intr_efd_disable(intr_handle); - if (intr_handle->intr_vec != NULL) { - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; - } + + /* Cleanup vector list */ + rte_intr_vec_list_free(intr_handle); rte_intr_enable(intr_handle); @@ -995,7 +994,7 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) struct ena_adapter *adapter = ring->adapter; struct ena_com_dev *ena_dev = &adapter->ena_dev; struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; struct ena_com_create_io_ctx ctx = /* policy set to _HOST just to satisfy icc compiler */ { ENA_ADMIN_PLACEMENT_POLICY_HOST, @@ -1015,7 +1014,10 @@ static int ena_create_io_queue(struct rte_eth_dev *dev, struct ena_ring *ring) ena_qid = ENA_IO_RXQ_IDX(ring->id); ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; if (rte_intr_dp_is_en(intr_handle)) - ctx.msix_vector = intr_handle->intr_vec[ring->id]; + ctx.msix_vector = + rte_intr_vec_list_index_get(intr_handle, + ring->id); + for (i = 0; i < ring->ring_size; i++) ring->empty_rx_reqs[i] = i; } @@ -1164,20 +1166,22 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->size_mask = nb_desc - 1; txq->numa_socket_id = socket_id; txq->pkts_without_db = false; + txq->last_cleanup_ticks = 0; - txq->tx_buffer_info = rte_zmalloc("txq->tx_buffer_info", - sizeof(struct ena_tx_buffer) * - txq->ring_size, - RTE_CACHE_LINE_SIZE); + txq->tx_buffer_info = rte_zmalloc_socket("txq->tx_buffer_info", + sizeof(struct ena_tx_buffer) * txq->ring_size, + RTE_CACHE_LINE_SIZE, + socket_id); if (!txq->tx_buffer_info) { PMD_DRV_LOG(ERR, "Failed to allocate memory for Tx buffer info\n"); return -ENOMEM; } - txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", - sizeof(u16) * txq->ring_size, - RTE_CACHE_LINE_SIZE); + txq->empty_tx_reqs = rte_zmalloc_socket("txq->empty_tx_reqs", + sizeof(uint16_t) * txq->ring_size, + RTE_CACHE_LINE_SIZE, + socket_id); if (!txq->empty_tx_reqs) { PMD_DRV_LOG(ERR, "Failed to allocate memory for empty Tx requests\n"); @@ -1186,9 +1190,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, } txq->push_buf_intermediate_buf = - rte_zmalloc("txq->push_buf_intermediate_buf", - txq->tx_max_header_size, - RTE_CACHE_LINE_SIZE); + rte_zmalloc_socket("txq->push_buf_intermediate_buf", + txq->tx_max_header_size, + RTE_CACHE_LINE_SIZE, + socket_id); if (!txq->push_buf_intermediate_buf) { PMD_DRV_LOG(ERR, "Failed to alloc push buffer for LLQ\n"); rte_free(txq->tx_buffer_info); @@ -1211,6 +1216,9 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->ring_size - ENA_REFILL_THRESH_PACKET); } + txq->missing_tx_completion_threshold = + RTE_MIN(txq->ring_size / 2, ENA_DEFAULT_MISSING_COMP); + /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; @@ -1270,19 +1278,20 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, rxq->numa_socket_id = socket_id; rxq->mb_pool = mp; - rxq->rx_buffer_info = rte_zmalloc("rxq->buffer_info", + rxq->rx_buffer_info = rte_zmalloc_socket("rxq->buffer_info", sizeof(struct ena_rx_buffer) * nb_desc, - RTE_CACHE_LINE_SIZE); + RTE_CACHE_LINE_SIZE, + socket_id); if (!rxq->rx_buffer_info) { PMD_DRV_LOG(ERR, "Failed to allocate memory for Rx buffer info\n"); return -ENOMEM; } - rxq->rx_refill_buffer = rte_zmalloc("rxq->rx_refill_buffer", - sizeof(struct rte_mbuf *) * nb_desc, - RTE_CACHE_LINE_SIZE); - + rxq->rx_refill_buffer = rte_zmalloc_socket("rxq->rx_refill_buffer", + sizeof(struct rte_mbuf *) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); if (!rxq->rx_refill_buffer) { PMD_DRV_LOG(ERR, "Failed to allocate memory for Rx refill buffer\n"); @@ -1291,9 +1300,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -ENOMEM; } - rxq->empty_rx_reqs = rte_zmalloc("rxq->empty_rx_reqs", - sizeof(uint16_t) * nb_desc, - RTE_CACHE_LINE_SIZE); + rxq->empty_rx_reqs = rte_zmalloc_socket("rxq->empty_rx_reqs", + sizeof(uint16_t) * nb_desc, + RTE_CACHE_LINE_SIZE, + socket_id); if (!rxq->empty_rx_reqs) { PMD_DRV_LOG(ERR, "Failed to allocate memory for empty Rx requests\n"); @@ -1535,6 +1545,87 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) } } +static int check_for_tx_completion_in_queue(struct ena_adapter *adapter, + struct ena_ring *tx_ring) +{ + struct ena_tx_buffer *tx_buf; + uint64_t timestamp; + uint64_t completion_delay; + uint32_t missed_tx = 0; + unsigned int i; + int rc = 0; + + for (i = 0; i < tx_ring->ring_size; ++i) { + tx_buf = &tx_ring->tx_buffer_info[i]; + timestamp = tx_buf->timestamp; + + if (timestamp == 0) + continue; + + completion_delay = rte_get_timer_cycles() - timestamp; + if (completion_delay > adapter->missing_tx_completion_to) { + if (unlikely(!tx_buf->print_once)) { + PMD_TX_LOG(WARNING, + "Found a Tx that wasn't completed on time, qid %d, index %d. " + "Missing Tx outstanding for %" PRIu64 " msecs.\n", + tx_ring->id, i, completion_delay / + rte_get_timer_hz() * 1000); + tx_buf->print_once = true; + } + ++missed_tx; + } + } + + if (unlikely(missed_tx > tx_ring->missing_tx_completion_threshold)) { + PMD_DRV_LOG(ERR, + "The number of lost Tx completions is above the threshold (%d > %d). " + "Trigger the device reset.\n", + missed_tx, + tx_ring->missing_tx_completion_threshold); + adapter->reset_reason = ENA_REGS_RESET_MISS_TX_CMPL; + adapter->trigger_reset = true; + rc = -EIO; + } + + tx_ring->tx_stats.missed_tx += missed_tx; + + return rc; +} + +static void check_for_tx_completions(struct ena_adapter *adapter) +{ + struct ena_ring *tx_ring; + uint64_t tx_cleanup_delay; + size_t qid; + int budget; + uint16_t nb_tx_queues = adapter->edev_data->nb_tx_queues; + + if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) + return; + + nb_tx_queues = adapter->edev_data->nb_tx_queues; + budget = adapter->missing_tx_completion_budget; + + qid = adapter->last_tx_comp_qid; + while (budget-- > 0) { + tx_ring = &adapter->tx_ring[qid]; + + /* Tx cleanup is called only by the burst function and can be + * called dynamically by the application. Also cleanup is + * limited by the threshold. To avoid false detection of the + * missing HW Tx completion, get the delay since last cleanup + * function was called. + */ + tx_cleanup_delay = rte_get_timer_cycles() - + tx_ring->last_cleanup_ticks; + if (tx_cleanup_delay < adapter->tx_cleanup_stall_delay) + check_for_tx_completion_in_queue(adapter, tx_ring); + qid = (qid + 1) % nb_tx_queues; + } + + adapter->last_tx_comp_qid = qid; +} + static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, void *arg) { @@ -1543,6 +1634,7 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, check_for_missing_keep_alive(adapter); check_for_admin_com_state(adapter); + check_for_tx_completions(adapter); if (unlikely(adapter->trigger_reset)) { PMD_DRV_LOG(ERR, "Trigger reset is on\n"); @@ -1734,7 +1826,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) pci_dev->addr.devid, pci_dev->addr.function); - intr_handle = &pci_dev->intr_handle; + intr_handle = pci_dev->intr_handle; adapter->regs = pci_dev->mem_resource[ENA_REGS_BAR].addr; adapter->dev_mem_base = pci_dev->mem_resource[ENA_MEM_BAR].addr; @@ -1913,12 +2005,32 @@ static int ena_dev_configure(struct rte_eth_dev *dev) adapter->state = ENA_ADAPTER_STATE_CONFIG; - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) - dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH; - dev->data->dev_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) + dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + dev->data->dev_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; + + /* Scattered Rx cannot be turned off in the HW, so this capability must + * be forced. + */ + dev->data->scattered_rx = 1; + + adapter->last_tx_comp_qid = 0; + + adapter->missing_tx_completion_budget = + RTE_MIN(ENA_MONITORED_TX_QUEUES, dev->data->nb_tx_queues); + + adapter->missing_tx_completion_to = ENA_TX_TIMEOUT; + /* To avoid detection of the spurious Tx completion timeout due to + * application not calling the Tx cleanup function, set timeout for the + * Tx queue which should be half of the missing completion timeout for a + * safety. If there will be a lot of missing Tx completions in the + * queue, they will be detected sooner or later. + */ + adapter->tx_cleanup_stall_delay = adapter->missing_tx_completion_to / 2; adapter->tx_selected_offloads = dev->data->dev_conf.txmode.offloads; adapter->rx_selected_offloads = dev->data->dev_conf.rxmode.offloads; + return 0; } @@ -1956,15 +2068,17 @@ static uint64_t ena_get_rx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.rx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.rx_offloads & (ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM)) port_offloads |= - DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM; if (adapter->offloads.rx_offloads & ENA_RX_RSS_HASH) - port_offloads |= DEV_RX_OFFLOAD_RSS_HASH; + port_offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; + + port_offloads |= RTE_ETH_RX_OFFLOAD_SCATTER; return port_offloads; } @@ -1974,17 +2088,17 @@ static uint64_t ena_get_tx_port_offloads(struct ena_adapter *adapter) uint64_t port_offloads = 0; if (adapter->offloads.tx_offloads & ENA_IPV4_TSO) - port_offloads |= DEV_TX_OFFLOAD_TCP_TSO; + port_offloads |= RTE_ETH_TX_OFFLOAD_TCP_TSO; if (adapter->offloads.tx_offloads & ENA_L3_IPV4_CSUM) - port_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + port_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM; if (adapter->offloads.tx_offloads & (ENA_L4_IPV4_CSUM_PARTIAL | ENA_L4_IPV4_CSUM | ENA_L4_IPV6_CSUM | ENA_L4_IPV6_CSUM_PARTIAL)) port_offloads |= - DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_CKSUM; - port_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + port_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS; return port_offloads; } @@ -2017,14 +2131,14 @@ static int ena_infos_get(struct rte_eth_dev *dev, ena_assert_msg(ena_dev != NULL, "Uninitialized device\n"); dev_info->speed_capa = - ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_2_5G | - ETH_LINK_SPEED_5G | - ETH_LINK_SPEED_10G | - ETH_LINK_SPEED_25G | - ETH_LINK_SPEED_40G | - ETH_LINK_SPEED_50G | - ETH_LINK_SPEED_100G; + RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_2_5G | + RTE_ETH_LINK_SPEED_5G | + RTE_ETH_LINK_SPEED_10G | + RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | + RTE_ETH_LINK_SPEED_100G; /* Inform framework about available features */ dev_info->rx_offload_capa = ena_get_rx_port_offloads(adapter); @@ -2190,7 +2304,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, } #endif - fill_hash = rx_ring->offloads & DEV_RX_OFFLOAD_RSS_HASH; + fill_hash = rx_ring->offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH; descs_in_use = rx_ring->ring_size - ena_com_free_q_entries(rx_ring->ena_com_io_sq) - 1; @@ -2241,7 +2355,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, ena_rx_mbuf_prepare(mbuf, &ena_rx_ctx, fill_hash); if (unlikely(mbuf->ol_flags & - (PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD))) { + (RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD))) { rte_atomic64_inc(&rx_ring->adapter->drv_stats->ierrors); ++rx_ring->rx_stats.bad_csum; } @@ -2289,10 +2403,10 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (ol_flags == 0) continue; - l4_csum_flag = ol_flags & PKT_TX_L4_MASK; + l4_csum_flag = ol_flags & RTE_MBUF_F_TX_L4_MASK; /* SCTP checksum offload is not supported by the ENA. */ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) || - l4_csum_flag == PKT_TX_SCTP_CKSUM) { + l4_csum_flag == RTE_MBUF_F_TX_SCTP_CKSUM) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "] has unsupported offloads flags set: 0x%" PRIu64 "\n", i, ol_flags); @@ -2302,12 +2416,12 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, #ifdef RTE_LIBRTE_ETHDEV_DEBUG /* Check if requested offload is also enabled for the queue */ - if ((ol_flags & PKT_TX_IP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) || - (l4_csum_flag == PKT_TX_TCP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || - (l4_csum_flag == PKT_TX_UDP_CKSUM && - !(tx_ring->offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { + if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_TCP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)) || + (l4_csum_flag == RTE_MBUF_F_TX_UDP_CKSUM && + !(tx_ring->offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: requested offloads: %" PRIu16 " are not enabled for the queue[%u]\n", i, m->nb_segs, tx_ring->id); @@ -2318,7 +2432,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* The caller is obligated to set l2 and l3 len if any cksum * offload is enabled. */ - if (unlikely(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) && + if (unlikely(ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK) && (m->l2_len == 0 || m->l3_len == 0))) { PMD_TX_LOG(DEBUG, "mbuf[%" PRIu32 "]: l2_len or l3_len values are 0 while the offload was requested\n", @@ -2337,14 +2451,14 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * pseudo header checksum is needed. */ need_pseudo_csum = false; - if (ol_flags & PKT_TX_IPV4) { - if (ol_flags & PKT_TX_IP_CKSUM && + if (ol_flags & RTE_MBUF_F_TX_IPV4) { + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM && !(dev_offload_capa & ENA_L3_IPV4_CSUM)) { rte_errno = ENOTSUP; return i; } - if (ol_flags & PKT_TX_TCP_SEG && + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG && !(dev_offload_capa & ENA_IPV4_TSO)) { rte_errno = ENOTSUP; return i; @@ -2353,7 +2467,7 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Check HW capabilities and if pseudo csum is needed * for L4 offloads. */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV4_CSUM)) { if (dev_offload_capa & ENA_L4_IPV4_CSUM_PARTIAL) { @@ -2370,22 +2484,22 @@ eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); if (frag_field & RTE_IPV4_HDR_DF_FLAG) { m->packet_type |= RTE_PTYPE_L4_NONFRAG; - } else if (ol_flags & PKT_TX_TCP_SEG) { + } else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { /* In case we are supposed to TSO and have DF * not set (DF=0) hardware must be provided with * partial checksum. */ need_pseudo_csum = true; } - } else if (ol_flags & PKT_TX_IPV6) { + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { /* There is no support for IPv6 TSO as for now. */ - if (ol_flags & PKT_TX_TCP_SEG) { + if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { rte_errno = ENOTSUP; return i; } /* Check HW capabilities and if pseudo csum is needed */ - if (l4_csum_flag != PKT_TX_L4_NO_CKSUM && + if (l4_csum_flag != RTE_MBUF_F_TX_L4_NO_CKSUM && !(dev_offload_capa & ENA_L4_IPV6_CSUM)) { if (dev_offload_capa & ENA_L4_IPV6_CSUM_PARTIAL) { @@ -2421,6 +2535,20 @@ static void ena_update_hints(struct ena_adapter *adapter, adapter->ena_dev.mmio_read.reg_read_to = hints->mmio_read_timeout * 1000; + if (hints->missing_tx_completion_timeout) { + if (hints->missing_tx_completion_timeout == + ENA_HW_HINTS_NO_TIMEOUT) { + adapter->missing_tx_completion_to = + ENA_HW_HINTS_NO_TIMEOUT; + } else { + /* Convert from msecs to ticks */ + adapter->missing_tx_completion_to = rte_get_timer_hz() * + hints->missing_tx_completion_timeout / 1000; + adapter->tx_cleanup_stall_delay = + adapter->missing_tx_completion_to / 2; + } + } + if (hints->driver_watchdog_timeout) { if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; @@ -2611,6 +2739,7 @@ static int ena_xmit_mbuf(struct ena_ring *tx_ring, struct rte_mbuf *mbuf) } tx_info->tx_descs = nb_hw_desc; + tx_info->timestamp = rte_get_timer_cycles(); tx_ring->tx_stats.cnt++; tx_ring->tx_stats.bytes += mbuf->pkt_len; @@ -2643,6 +2772,7 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring) /* Get Tx info & store how many descs were processed */ tx_info = &tx_ring->tx_buffer_info[req_id]; + tx_info->timestamp = 0; mbuf = tx_info->mbuf; rte_pktmbuf_free(mbuf); @@ -2663,6 +2793,9 @@ static void ena_tx_cleanup(struct ena_ring *tx_ring) ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); } + + /* Notify completion handler that the cleanup was just called */ + tx_ring->last_cleanup_ticks = rte_get_timer_cycles(); } static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, @@ -2981,7 +3114,7 @@ static int ena_parse_devargs(struct ena_adapter *adapter, static int ena_setup_rx_intr(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); - struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + struct rte_intr_handle *intr_handle = pci_dev->intr_handle; int rc; uint16_t vectors_nb, i; bool rx_intr_requested = dev->data->dev_conf.intr_conf.rxq; @@ -3008,9 +3141,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) goto enable_intr; } - intr_handle->intr_vec = rte_zmalloc("intr_vec", - dev->data->nb_rx_queues * sizeof(*intr_handle->intr_vec), 0); - if (intr_handle->intr_vec == NULL) { + /* Allocate the vector list */ + if (rte_intr_vec_list_alloc(intr_handle, "intr_vec", + dev->data->nb_rx_queues)) { PMD_DRV_LOG(ERR, "Failed to allocate interrupt vector for %d queues\n", dev->data->nb_rx_queues); @@ -3029,7 +3162,9 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) } for (i = 0; i < vectors_nb; ++i) - intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i; + if (rte_intr_vec_list_index_set(intr_handle, i, + RTE_INTR_VEC_RXTX_OFFSET + i)) + goto disable_intr_efd; rte_intr_enable(intr_handle); return 0; @@ -3037,8 +3172,7 @@ static int ena_setup_rx_intr(struct rte_eth_dev *dev) disable_intr_efd: rte_intr_efd_disable(intr_handle); free_intr_vec: - rte_free(intr_handle->intr_vec); - intr_handle->intr_vec = NULL; + rte_intr_vec_list_free(intr_handle); enable_intr: rte_intr_enable(intr_handle); return rc;