X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=c255dc6dbdc96714d7a0e5b0464ca17acae6902d;hb=4e05a229c5da;hp=f0e95ef589a65b08838734d748891036fca094c9;hpb=c2034976673dfac9afc72bf1bcde35989ac909de;p=dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index f0e95ef589..c255dc6dbd 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -85,6 +85,9 @@ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#define ENA_MAX_RING_DESC ENA_DEFAULT_RING_SIZE +#define ENA_MIN_RING_DESC 128 + enum ethtool_stringset { ETH_SS_TEST = 0, ETH_SS_STATS, @@ -381,6 +384,27 @@ static inline int validate_rx_req_id(struct ena_ring *rx_ring, uint16_t req_id) return -EFAULT; } +static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) +{ + struct ena_tx_buffer *tx_info = NULL; + + if (likely(req_id < tx_ring->ring_size)) { + tx_info = &tx_ring->tx_buffer_info[req_id]; + if (likely(tx_info->mbuf)) + return 0; + } + + if (tx_info) + RTE_LOG(ERR, PMD, "tx_info doesn't have valid mbuf\n"); + else + RTE_LOG(ERR, PMD, "Invalid req_id: %hu\n", req_id); + + /* Trigger device reset */ + tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; + tx_ring->adapter->trigger_reset = true; + return -EFAULT; +} + static void ena_config_host_info(struct ena_com_dev *ena_dev) { struct ena_admin_host_info *host_info; @@ -412,9 +436,12 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) rc = ena_com_set_host_attributes(ena_dev); if (rc) { - RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -465,9 +492,12 @@ static void ena_config_debug_area(struct ena_adapter *adapter) rc = ena_com_set_host_attributes(&adapter->ena_dev); if (rc) { - RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); - if (rc != -ENA_COM_UNSUPPORTED) - goto err; + if (rc == -ENA_COM_UNSUPPORTED) + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + else + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + + goto err; } return; @@ -510,7 +540,9 @@ ena_dev_reset(struct rte_eth_dev *dev) ena_com_set_admin_running_state(ena_dev, false); - ena_com_dev_reset(ena_dev, adapter->reset_reason); + rc = ena_com_dev_reset(ena_dev, adapter->reset_reason); + if (rc) + RTE_LOG(ERR, PMD, "Device reset failed\n"); for (i = 0; i < nb_queues; i++) mb_pool_rx[i] = adapter->rx_ring[i].mb_pool; @@ -555,7 +587,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret, i; + int rc, i; u16 entry_value; int conf_idx; int idx; @@ -567,8 +599,7 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, RTE_LOG(WARNING, PMD, "indirection table %d is bigger than supported (%d)\n", reta_size, ENA_RX_RSS_TABLE_SIZE); - ret = -EINVAL; - goto err; + return -EINVAL; } for (i = 0 ; i < reta_size ; i++) { @@ -580,29 +611,28 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, if (TEST_BIT(reta_conf[conf_idx].mask, idx)) { entry_value = ENA_IO_RXQ_IDX(reta_conf[conf_idx].reta[idx]); - ret = ena_com_indirect_table_fill_entry(ena_dev, - i, - entry_value); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + + rc = ena_com_indirect_table_fill_entry(ena_dev, + i, + entry_value); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot fill indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } } } - ret = ena_com_indirect_table_set(ena_dev); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_set(ena_dev); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "Cannot flush the indirect table\n"); - ret = -ENOTSUP; - goto err; + return rc; } RTE_LOG(DEBUG, PMD, "%s(): RSS configured %d entries for port %d\n", __func__, reta_size, adapter->rte_dev->data->port_id); -err: - return ret; + + return 0; } /* Query redirection table. */ @@ -613,7 +643,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_com_dev *ena_dev = &adapter->ena_dev; - int ret; + int rc; int i; u32 indirect_table[ENA_RX_RSS_TABLE_SIZE] = {0}; int reta_conf_idx; @@ -623,11 +653,10 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, (reta_size > RTE_RETA_GROUP_SIZE && ((reta_conf + 1) == NULL))) return -EINVAL; - ret = ena_com_indirect_table_get(ena_dev, indirect_table); - if (unlikely(ret && (ret != ENA_COM_UNSUPPORTED))) { + rc = ena_com_indirect_table_get(ena_dev, indirect_table); + if (unlikely(rc && rc != ENA_COM_UNSUPPORTED)) { RTE_LOG(ERR, PMD, "cannot get indirect table\n"); - ret = -ENOTSUP; - goto err; + return -ENOTSUP; } for (i = 0 ; i < reta_size ; i++) { @@ -637,8 +666,8 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev, reta_conf[reta_conf_idx].reta[reta_idx] = ENA_IO_RXQ_IDX_REV(indirect_table[i]); } -err: - return ret; + + return 0; } static int ena_rss_init_default(struct ena_adapter *adapter) @@ -819,7 +848,7 @@ static int ena_link_update(struct rte_eth_dev *dev, adapter = (struct ena_adapter *)(dev->data->dev_private); link->link_status = adapter->link_status ? ETH_LINK_UP : ETH_LINK_DOWN; - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; return 0; @@ -860,7 +889,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, PMD_INIT_LOG(ERR, "failed to restart queue %d type(%d)", i, ring_type); - return -1; + return rc; } } } @@ -884,9 +913,11 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) { uint32_t max_frame_len = ena_get_mtu_conf(adapter); - if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); - return -1; + if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) { + PMD_INIT_LOG(ERR, "Unsupported MTU of %d. " + "max mtu: %d, min mtu: %d\n", + max_frame_len, adapter->max_mtu, ENA_MIN_MTU); + return ENA_COM_UNSUPPORTED; } return 0; @@ -894,6 +925,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) static int ena_calc_queue_size(struct ena_com_dev *ena_dev, + u16 *max_tx_sgl_size, struct ena_com_dev_get_features_ctx *get_feat_ctx) { uint32_t queue_size = ENA_DEFAULT_RING_SIZE; @@ -911,11 +943,14 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, if (!rte_is_power_of_2(queue_size)) queue_size = rte_align32pow2(queue_size >> 1); - if (queue_size == 0) { + if (unlikely(queue_size == 0)) { PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } + *max_tx_sgl_size = RTE_MIN(ENA_PKT_MAX_BUFS, + get_feat_ctx->max_queues.max_packet_tx_descs); + return queue_size; } @@ -980,12 +1015,12 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); - if (mtu > ena_get_mtu_conf(adapter)) { + if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) { RTE_LOG(ERR, PMD, - "Given MTU (%d) exceeds maximum MTU supported (%d)\n", - mtu, ena_get_mtu_conf(adapter)); - rc = -EINVAL; - goto err; + "Invalid MTU setting. new_mtu: %d " + "max mtu: %d min mtu: %d\n", + mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU); + return -EINVAL; } rc = ena_com_set_dev_mtu(ena_dev, mtu); @@ -994,7 +1029,6 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) else RTE_LOG(NOTICE, PMD, "Set MTU: %d\n", mtu); -err: return rc; } @@ -1018,7 +1052,7 @@ static int ena_start(struct rte_eth_dev *dev) return rc; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG) { + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) return rc; @@ -1065,7 +1099,7 @@ static int ena_queue_restart(struct ena_ring *ring) rc = ena_populate_rx_queue(ring, bufs_num); if (rc != bufs_num) { PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); - return (-1); + return ENA_COM_FAULT; } return 0; @@ -1095,12 +1129,12 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of RX queue: %d is not a power of 2.", + "Unsupported size of TX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1126,6 +1160,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", queue_idx, ena_qid, rc); + return rc; } txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1137,10 +1172,11 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "Failed to get TX queue handlers. TX queue num %d rc: %d\n", queue_idx, rc); - ena_com_destroy_io_queue(ena_dev, ena_qid); - goto err; + goto err_destroy_io_queue; } + ena_com_update_numa_node(txq->ena_com_io_cq, ctx.numa_node); + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -1152,7 +1188,8 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->tx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx buffer info\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_destroy_io_queue; } txq->empty_tx_reqs = rte_zmalloc("txq->empty_tx_reqs", @@ -1160,9 +1197,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!txq->empty_tx_reqs) { RTE_LOG(ERR, PMD, "failed to alloc mem for tx reqs\n"); - rte_free(txq->tx_buffer_info); - return -ENOMEM; + rc = -ENOMEM; + goto err_free; } + for (i = 0; i < txq->ring_size; i++) txq->empty_tx_reqs[i] = i; @@ -1174,7 +1212,14 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; -err: + + return 0; + +err_free: + rte_free(txq->tx_buffer_info); + +err_destroy_io_queue: + ena_com_destroy_io_queue(ena_dev, ena_qid); return rc; } @@ -1201,12 +1246,12 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(CRIT, PMD, "API violation. Queue %d is already configured\n", queue_idx); - return -1; + return ENA_COM_FAULT; } if (!rte_is_power_of_2(nb_desc)) { RTE_LOG(ERR, PMD, - "Unsupported size of TX queue: %d is not a power of 2.", + "Unsupported size of RX queue: %d is not a power of 2.", nb_desc); return -EINVAL; } @@ -1228,9 +1273,11 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, ctx.numa_node = ena_cpu_to_node(queue_idx); rc = ena_com_create_io_queue(ena_dev, &ctx); - if (rc) + if (rc) { RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); + return rc; + } rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; @@ -1243,6 +1290,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, "Failed to get RX queue handlers. RX queue num %d rc: %d\n", queue_idx, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } rxq->port_id = dev->data->port_id; @@ -1256,6 +1304,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_CACHE_LINE_SIZE); if (!rxq->rx_buffer_info) { RTE_LOG(ERR, PMD, "failed to alloc mem for rx buffer info\n"); + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } @@ -1266,6 +1315,7 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, RTE_LOG(ERR, PMD, "failed to alloc mem for empty rx reqs\n"); rte_free(rxq->rx_buffer_info); rxq->rx_buffer_info = NULL; + ena_com_destroy_io_queue(ena_dev, ena_qid); return -ENOMEM; } @@ -1316,6 +1366,10 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); req_id = rxq->empty_rx_reqs[next_to_use_masked]; + rc = validate_rx_req_id(rxq, req_id); + if (unlikely(rc < 0)) + break; + /* prepare physical address for DMA transaction */ ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; @@ -1331,9 +1385,17 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) next_to_use++; } + if (unlikely(i < count)) + RTE_LOG(WARNING, PMD, "refilled rx qid %d with only %d " + "buffers (from %d)\n", rxq->id, i, count); + /* When we submitted free recources to device... */ - if (i > 0) { - /* ...let HW know that it can fill buffers with data */ + if (likely(i > 0)) { + /* ...let HW know that it can fill buffers with data + * + * Add memory barrier to make sure the desc were written before + * issue a doorbell + */ rte_wmb(); ena_com_write_sq_doorbell(rxq->ena_com_io_sq); @@ -1408,7 +1470,9 @@ static int ena_device_init(struct ena_com_dev *ena_dev, aenq_groups = BIT(ENA_ADMIN_LINK_CHANGE) | BIT(ENA_ADMIN_NOTIFICATION) | - BIT(ENA_ADMIN_KEEP_ALIVE); + BIT(ENA_ADMIN_KEEP_ALIVE) | + BIT(ENA_ADMIN_FATAL_ERROR) | + BIT(ENA_ADMIN_WARNING); aenq_groups &= get_feat_ctx->aenq.supported_groups; rc = ena_com_set_aenq_config(ena_dev, aenq_groups); @@ -1436,7 +1500,7 @@ static void ena_interrupt_handler_rte(void *cb_arg) struct ena_com_dev *ena_dev = &adapter->ena_dev; ena_com_admin_q_comp_intr_handler(ena_dev); - if (adapter->state != ENA_ADAPTER_STATE_CLOSED) + if (likely(adapter->state != ENA_ADAPTER_STATE_CLOSED)) ena_com_aenq_intr_handler(ena_dev, adapter); } @@ -1482,6 +1546,24 @@ static void ena_timer_wd_callback(__rte_unused struct rte_timer *timer, } } +static int ena_calc_io_queue_num(__rte_unused struct ena_com_dev *ena_dev, + struct ena_com_dev_get_features_ctx *get_feat_ctx) +{ + int io_sq_num, io_cq_num, io_queue_num; + + io_sq_num = get_feat_ctx->max_queues.max_sq_num; + io_cq_num = get_feat_ctx->max_queues.max_cq_num; + + io_queue_num = RTE_MIN(io_sq_num, io_cq_num); + + if (unlikely(io_queue_num == 0)) { + RTE_LOG(ERR, PMD, "Number of IO queues should not be 0\n"); + return -EFAULT; + } + + return io_queue_num; +} + static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; @@ -1491,6 +1573,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) struct ena_com_dev *ena_dev = &adapter->ena_dev; struct ena_com_dev_get_features_ctx get_feat_ctx; int queue_size, rc; + u16 tx_sgl_size = 0; static int adapters_found; bool wd_state; @@ -1540,20 +1623,25 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) rc = ena_device_init(ena_dev, &get_feat_ctx, &wd_state); if (rc) { PMD_INIT_LOG(CRIT, "Failed to init ENA device"); - return -1; + goto err; } adapter->wd_state = wd_state; ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; - adapter->num_queues = get_feat_ctx.max_queues.max_sq_num; + adapter->num_queues = ena_calc_io_queue_num(ena_dev, + &get_feat_ctx); - queue_size = ena_calc_queue_size(ena_dev, &get_feat_ctx); - if ((queue_size <= 0) || (adapter->num_queues <= 0)) - return -EFAULT; + queue_size = ena_calc_queue_size(ena_dev, &tx_sgl_size, &get_feat_ctx); + if (queue_size <= 0 || adapter->num_queues <= 0) { + rc = -EFAULT; + goto err_device_destroy; + } adapter->tx_ring_size = queue_size; adapter->rx_ring_size = queue_size; + adapter->max_tx_sgl_size = tx_sgl_size; + /* prepare ring structures */ ena_init_rings(adapter); @@ -1576,7 +1664,8 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) RTE_CACHE_LINE_SIZE); if (!adapter->drv_stats) { RTE_LOG(ERR, PMD, "failed to alloc mem for adapter stats\n"); - return -ENOMEM; + rc = -ENOMEM; + goto err_delete_debug_area; } rte_intr_callback_register(intr_handle, @@ -1594,6 +1683,16 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) adapter->state = ENA_ADAPTER_STATE_INIT; return 0; + +err_delete_debug_area: + ena_com_delete_debug_area(ena_dev); + +err_device_destroy: + ena_com_delete_host_info(ena_dev); + ena_com_admin_destroy(ena_dev); + +err: + return rc; } static int eth_ena_dev_uninit(struct rte_eth_dev *eth_dev) @@ -1652,6 +1751,7 @@ static void ena_init_rings(struct ena_adapter *adapter) ring->id = i; ring->tx_mem_queue_type = adapter->ena_dev.tx_mem_queue_type; ring->tx_max_header_size = adapter->ena_dev.tx_max_header_size; + ring->sgl_size = adapter->max_tx_sgl_size; } for (i = 0; i < adapter->num_queues; i++) { @@ -1732,6 +1832,16 @@ static void ena_infos_get(struct rte_eth_dev *dev, adapter->tx_supported_offloads = tx_feat; adapter->rx_supported_offloads = rx_feat; + + dev_info->rx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->rx_desc_lim.nb_min = ENA_MIN_RING_DESC; + + dev_info->tx_desc_lim.nb_max = ENA_MAX_RING_DESC; + dev_info->tx_desc_lim.nb_min = ENA_MIN_RING_DESC; + dev_info->tx_desc_lim.nb_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); + dev_info->tx_desc_lim.nb_mtu_seg_max = RTE_MIN(ENA_PKT_MAX_BUFS, + feat.max_queues.max_packet_tx_descs); } static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, @@ -1776,6 +1886,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, &ena_rx_ctx); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "ena_com_rx_pkt error %d\n", rc); + rx_ring->adapter->trigger_reset = true; return 0; } @@ -1793,7 +1904,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf->data_off = RTE_PKTMBUF_HEADROOM; mbuf->refcnt = 1; mbuf->next = NULL; - if (segments == 0) { + if (unlikely(segments == 0)) { mbuf->nb_segs = ena_rx_ctx.descs; mbuf->port = rx_ring->port_id; mbuf->pkt_len = 0; @@ -1923,6 +2034,23 @@ static void ena_update_hints(struct ena_adapter *adapter, } } +static int ena_check_and_linearize_mbuf(struct ena_ring *tx_ring, + struct rte_mbuf *mbuf) +{ + int num_segments, rc; + + num_segments = mbuf->nb_segs; + + if (likely(num_segments < tx_ring->sgl_size)) + return 0; + + rc = rte_pktmbuf_linearize(mbuf); + if (unlikely(rc)) + RTE_LOG(WARNING, PMD, "Mbuf linearize failed\n"); + + return rc; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { @@ -1953,6 +2081,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; + rc = ena_check_and_linearize_mbuf(tx_ring, mbuf); + if (unlikely(rc)) + break; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; @@ -2030,6 +2162,10 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { + rc = validate_tx_req_id(tx_ring, req_id); + if (rc) + break; + /* Get Tx info & store how many descs were processed */ tx_info = &tx_ring->tx_buffer_info[req_id]; total_tx_descs += tx_info->tx_descs; @@ -2074,7 +2210,8 @@ static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ena_pmd = { .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_WC_ACTIVATE, .probe = eth_ena_pci_probe, .remove = eth_ena_pci_remove, }; @@ -2083,9 +2220,7 @@ RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci"); -RTE_INIT(ena_init_log); -static void -ena_init_log(void) +RTE_INIT(ena_init_log) { ena_logtype_init = rte_log_register("pmd.net.ena.init"); if (ena_logtype_init >= 0) @@ -2154,7 +2289,8 @@ static void ena_keep_alive(void *adapter_data, static void unimplemented_aenq_handler(__rte_unused void *data, __rte_unused struct ena_admin_aenq_entry *aenq_e) { - // Unimplemented handler + RTE_LOG(ERR, PMD, "Unknown event was received or event with " + "unimplemented handler\n"); } static struct ena_aenq_handlers aenq_handlers = {