X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_dcf_ethdev.c;h=2faed3cc7a7b6f81ca84500f236524a0e43f2eab;hb=6982f1848f985f89ecc4b16b3a09a504da3fec5b;hp=01412ced00e3ffb7253b4bdd8dcdfd019d16d05a;hpb=6f0a54b74bce90fca6264773db1b0bf13871874f;p=dpdk.git diff --git a/drivers/net/ice/ice_dcf_ethdev.c b/drivers/net/ice/ice_dcf_ethdev.c index 01412ced00..2faed3cc7a 100644 --- a/drivers/net/ice/ice_dcf_ethdev.c +++ b/drivers/net/ice/ice_dcf_ethdev.c @@ -42,18 +42,576 @@ ice_dcf_xmit_pkts(__rte_unused void *tx_queue, return 0; } +static int +ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq) +{ + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_eth_dev_data *dev_data = dev->data; + struct iavf_hw *hw = &dcf_ad->real_hw.avf; + uint16_t buf_size, max_pkt_len, len; + + buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM; + rxq->rx_hdr_len = 0; + rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S)); + len = ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len; + max_pkt_len = RTE_MIN(len, dev->data->dev_conf.rxmode.max_rx_pkt_len); + + /* Check if the jumbo frame and maximum packet length are set + * correctly. + */ + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + if (max_pkt_len <= RTE_ETHER_MAX_LEN || + max_pkt_len > ICE_FRAME_SIZE_MAX) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is enabled", + (uint32_t)RTE_ETHER_MAX_LEN, + (uint32_t)ICE_FRAME_SIZE_MAX); + return -EINVAL; + } + } else { + if (max_pkt_len < RTE_ETHER_MIN_LEN || + max_pkt_len > RTE_ETHER_MAX_LEN) { + PMD_DRV_LOG(ERR, "maximum packet length must be " + "larger than %u and smaller than %u, " + "as jumbo frame is disabled", + (uint32_t)RTE_ETHER_MIN_LEN, + (uint32_t)RTE_ETHER_MAX_LEN); + return -EINVAL; + } + } + + rxq->max_pkt_len = max_pkt_len; + if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) || + (rxq->max_pkt_len + 2 * ICE_VLAN_TAG_SIZE) > buf_size) { + dev_data->scattered_rx = 1; + } + rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id); + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + return 0; +} + +static int +ice_dcf_init_rx_queues(struct rte_eth_dev *dev) +{ + struct ice_rx_queue **rxq = + (struct ice_rx_queue **)dev->data->rx_queues; + int i, ret; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + if (!rxq[i] || !rxq[i]->q_set) + continue; + ret = ice_dcf_init_rxq(dev, rxq[i]); + if (ret) + return ret; + } + + ice_set_rx_function(dev); + ice_set_tx_function(dev); + + return 0; +} + +#define IAVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define IAVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + +#define IAVF_ITR_INDEX_DEFAULT 0 +#define IAVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ +#define IAVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ + +static inline uint16_t +iavf_calc_itr_interval(int16_t interval) +{ + if (interval < 0 || interval > IAVF_QUEUE_ITR_INTERVAL_MAX) + interval = IAVF_QUEUE_ITR_INTERVAL_DEFAULT; + + /* Convert to hardware count, as writing each 1 represents 2 us */ + return interval / 2; +} + +static int +ice_dcf_config_rx_queues_irqs(struct rte_eth_dev *dev, + struct rte_intr_handle *intr_handle) +{ + struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_hw *hw = &adapter->real_hw; + uint16_t interval, i; + int vec; + + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + if (rte_intr_efd_enable(intr_handle, dev->data->nb_rx_queues)) + return -1; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_DRV_LOG(ERR, "Failed to allocate %d rx intr_vec", + dev->data->nb_rx_queues); + return -1; + } + } + + if (!dev->data->dev_conf.intr_conf.rxq || + !rte_intr_dp_is_en(intr_handle)) { + /* Rx interrupt disabled, Map interrupt only for writeback */ + hw->nb_msix = 1; + if (hw->vf_res->vf_cap_flags & + VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) { + /* If WB_ON_ITR supports, enable it */ + hw->msix_base = IAVF_RX_VEC_START; + IAVF_WRITE_REG(&hw->avf, + IAVF_VFINT_DYN_CTLN1(hw->msix_base - 1), + IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK | + IAVF_VFINT_DYN_CTLN1_WB_ON_ITR_MASK); + } else { + /* If no WB_ON_ITR offload flags, need to set + * interrupt for descriptor write back. + */ + hw->msix_base = IAVF_MISC_VEC_ID; + + /* set ITR to max */ + interval = + iavf_calc_itr_interval(IAVF_QUEUE_ITR_INTERVAL_MAX); + IAVF_WRITE_REG(&hw->avf, IAVF_VFINT_DYN_CTL01, + IAVF_VFINT_DYN_CTL01_INTENA_MASK | + (IAVF_ITR_INDEX_DEFAULT << + IAVF_VFINT_DYN_CTL01_ITR_INDX_SHIFT) | + (interval << + IAVF_VFINT_DYN_CTL01_INTERVAL_SHIFT)); + } + IAVF_WRITE_FLUSH(&hw->avf); + /* map all queues to the same interrupt */ + for (i = 0; i < dev->data->nb_rx_queues; i++) + hw->rxq_map[hw->msix_base] |= 1 << i; + } else { + if (!rte_intr_allow_others(intr_handle)) { + hw->nb_msix = 1; + hw->msix_base = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[hw->msix_base] |= 1 << i; + intr_handle->intr_vec[i] = IAVF_MISC_VEC_ID; + } + PMD_DRV_LOG(DEBUG, + "vector %u are mapping to all Rx queues", + hw->msix_base); + } else { + /* If Rx interrupt is reuquired, and we can use + * multi interrupts, then the vec is from 1 + */ + hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors, + intr_handle->nb_efd); + hw->msix_base = IAVF_MISC_VEC_ID; + vec = IAVF_MISC_VEC_ID; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + hw->rxq_map[vec] |= 1 << i; + intr_handle->intr_vec[i] = vec++; + if (vec >= hw->nb_msix) + vec = IAVF_RX_VEC_START; + } + PMD_DRV_LOG(DEBUG, + "%u vectors are mapping to %u Rx queues", + hw->nb_msix, dev->data->nb_rx_queues); + } + } + + if (ice_dcf_config_irq_map(hw)) { + PMD_DRV_LOG(ERR, "config interrupt mapping failed"); + return -1; + } + return 0; +} + +static int +alloc_rxq_mbufs(struct ice_rx_queue *rxq) +{ + volatile union ice_rx_flex_desc *rxd; + struct rte_mbuf *mbuf = NULL; + uint64_t dma_addr; + uint16_t i; + + for (i = 0; i < rxq->nb_rx_desc; i++) { + mbuf = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(!mbuf)) { + PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); + return -ENOMEM; + } + + rte_mbuf_refcnt_set(mbuf, 1); + mbuf->next = NULL; + mbuf->data_off = RTE_PKTMBUF_HEADROOM; + mbuf->nb_segs = 1; + mbuf->port = rxq->port_id; + + dma_addr = + rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); + + rxd = &rxq->rx_ring[i]; + rxd->read.pkt_addr = dma_addr; + rxd->read.hdr_addr = 0; +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + rxd->read.rsvd1 = 0; + rxd->read.rsvd2 = 0; +#endif + + rxq->sw_ring[i].mbuf = (void *)mbuf; + } + + return 0; +} + +static int +ice_dcf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct iavf_hw *hw = &ad->real_hw.avf; + struct ice_rx_queue *rxq; + int err = 0; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + rxq = dev->data->rx_queues[rx_queue_id]; + + err = alloc_rxq_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } + + rte_wmb(); + + /* Init the RX tail register. */ + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = ice_dcf_switch_queue(&ad->real_hw, rx_queue_id, true, true); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); + return err; + } + + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static inline void +reset_rx_queue(struct ice_rx_queue *rxq) +{ + uint16_t len; + uint32_t i; + + if (!rxq) + return; + + len = rxq->nb_rx_desc + ICE_RX_MAX_BURST; + + for (i = 0; i < len * sizeof(union ice_rx_flex_desc); i++) + ((volatile char *)rxq->rx_ring)[i] = 0; + + memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf)); + + for (i = 0; i < ICE_RX_MAX_BURST; i++) + rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf; + + /* for rx bulk */ + rxq->rx_nb_avail = 0; + rxq->rx_next_avail = 0; + rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1); + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +static inline void +reset_tx_queue(struct ice_tx_queue *txq) +{ + struct ice_tx_entry *txe; + uint32_t i, size; + uint16_t prev; + + if (!txq) { + PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL"); + return; + } + + txe = txq->sw_ring; + size = sizeof(struct ice_tx_desc) * txq->nb_tx_desc; + for (i = 0; i < size; i++) + ((volatile char *)txq->tx_ring)[i] = 0; + + prev = (uint16_t)(txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + txq->tx_ring[i].cmd_type_offset_bsz = + rte_cpu_to_le_64(IAVF_TX_DESC_DTYPE_DESC_DONE); + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + + txq->last_desc_cleaned = txq->nb_tx_desc - 1; + txq->nb_tx_free = txq->nb_tx_desc - 1; + + txq->tx_next_dd = txq->tx_rs_thresh - 1; + txq->tx_next_rs = txq->tx_rs_thresh - 1; +} + +static int +ice_dcf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_rx_queue *rxq; + int err; + + if (rx_queue_id >= dev->data->nb_rx_queues) + return -EINVAL; + + err = ice_dcf_switch_queue(hw, rx_queue_id, true, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; + } + + rxq = dev->data->rx_queues[rx_queue_id]; + rxq->rx_rel_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +ice_dcf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct iavf_hw *hw = &ad->real_hw.avf; + struct ice_tx_queue *txq; + int err = 0; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + txq = dev->data->tx_queues[tx_queue_id]; + + /* Init the RX tail register. */ + txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(tx_queue_id); + IAVF_PCI_REG_WRITE(txq->qtx_tail, 0); + IAVF_WRITE_FLUSH(hw); + + /* Ready to switch the queue on */ + err = ice_dcf_switch_queue(&ad->real_hw, tx_queue_id, false, true); + + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; + } + + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + + return 0; +} + +static int +ice_dcf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_tx_queue *txq; + int err; + + if (tx_queue_id >= dev->data->nb_tx_queues) + return -EINVAL; + + err = ice_dcf_switch_queue(hw, tx_queue_id, false, false); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off", + tx_queue_id); + return err; + } + + txq = dev->data->tx_queues[tx_queue_id]; + txq->tx_rel_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + + return 0; +} + +static int +ice_dcf_start_queues(struct rte_eth_dev *dev) +{ + struct ice_rx_queue *rxq; + struct ice_tx_queue *txq; + int nb_rxq = 0; + int nb_txq, i; + + for (nb_txq = 0; nb_txq < dev->data->nb_tx_queues; nb_txq++) { + txq = dev->data->tx_queues[nb_txq]; + if (txq->tx_deferred_start) + continue; + if (ice_dcf_tx_queue_start(dev, nb_txq) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_txq); + goto tx_err; + } + } + + for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) { + rxq = dev->data->rx_queues[nb_rxq]; + if (rxq->rx_deferred_start) + continue; + if (ice_dcf_rx_queue_start(dev, nb_rxq) != 0) { + PMD_DRV_LOG(ERR, "Fail to start queue %u", nb_rxq); + goto rx_err; + } + } + + return 0; + + /* stop the started queues if failed to start all queues */ +rx_err: + for (i = 0; i < nb_rxq; i++) + ice_dcf_rx_queue_stop(dev, i); +tx_err: + for (i = 0; i < nb_txq; i++) + ice_dcf_tx_queue_stop(dev, i); + + return -1; +} + static int ice_dcf_dev_start(struct rte_eth_dev *dev) { + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; + struct ice_adapter *ad = &dcf_ad->parent; + struct ice_dcf_hw *hw = &dcf_ad->real_hw; + int ret; + + ad->pf.adapter_stopped = 0; + + hw->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues, + dev->data->nb_tx_queues); + + ret = ice_dcf_init_rx_queues(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to init queues"); + return ret; + } + + if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + ret = ice_dcf_init_rss(hw); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to configure RSS"); + return ret; + } + } + + ret = ice_dcf_configure_queues(hw); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config queues"); + return ret; + } + + ret = ice_dcf_config_rx_queues_irqs(dev, intr_handle); + if (ret) { + PMD_DRV_LOG(ERR, "Fail to config rx queues' irqs"); + return ret; + } + + if (dev->data->dev_conf.intr_conf.rxq != 0) { + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); + } + + ret = ice_dcf_start_queues(dev); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to enable queues"); + return ret; + } + + ret = ice_dcf_add_del_all_mac_addr(hw, true); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add mac addr"); + return ret; + } + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; } +static void +ice_dcf_stop_queues(struct rte_eth_dev *dev) +{ + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct ice_rx_queue *rxq; + struct ice_tx_queue *txq; + int ret, i; + + /* Stop All queues */ + ret = ice_dcf_disable_queues(hw); + if (ret) + PMD_DRV_LOG(WARNING, "Fail to stop queues"); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if (!txq) + continue; + txq->tx_rel_mbufs(txq); + reset_tx_queue(txq); + dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + if (!rxq) + continue; + rxq->rx_rel_mbufs(rxq); + reset_rx_queue(rxq); + dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; + } +} + static void ice_dcf_dev_stop(struct rte_eth_dev *dev) { + struct ice_dcf_adapter *dcf_ad = dev->data->dev_private; + struct rte_intr_handle *intr_handle = dev->intr_handle; + struct ice_adapter *ad = &dcf_ad->parent; + + if (ad->pf.adapter_stopped == 1) { + PMD_DRV_LOG(DEBUG, "Port is already stopped"); + return; + } + + ice_dcf_stop_queues(dev); + + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } + + ice_dcf_add_del_all_mac_addr(&dcf_ad->real_hw, false); dev->data->dev_link.link_status = ETH_LINK_DOWN; + ad->pf.adapter_stopped = 1; } static int @@ -148,19 +706,6 @@ ice_dcf_dev_info_get(struct rte_eth_dev *dev, return 0; } -static int -ice_dcf_stats_get(__rte_unused struct rte_eth_dev *dev, - __rte_unused struct rte_eth_stats *igb_stats) -{ - return 0; -} - -static int -ice_dcf_stats_reset(__rte_unused struct rte_eth_dev *dev) -{ - return 0; -} - static int ice_dcf_dev_promiscuous_enable(__rte_unused struct rte_eth_dev *dev) { @@ -213,60 +758,115 @@ ice_dcf_dev_filter_ctrl(struct rte_eth_dev *dev, return ret; } +#define ICE_DCF_32_BIT_WIDTH (CHAR_BIT * 4) +#define ICE_DCF_48_BIT_WIDTH (CHAR_BIT * 6) +#define ICE_DCF_48_BIT_MASK RTE_LEN2MASK(ICE_DCF_48_BIT_WIDTH, uint64_t) + static void -ice_dcf_dev_close(struct rte_eth_dev *dev) +ice_dcf_stat_update_48(uint64_t *offset, uint64_t *stat) { - struct ice_dcf_adapter *adapter = dev->data->dev_private; - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return; + if (*stat >= *offset) + *stat = *stat - *offset; + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << ICE_DCF_48_BIT_WIDTH)) - *offset); - dev->dev_ops = NULL; - dev->rx_pkt_burst = NULL; - dev->tx_pkt_burst = NULL; + *stat &= ICE_DCF_48_BIT_MASK; +} - ice_dcf_uninit_parent_adapter(dev); - ice_dcf_uninit_hw(dev, &adapter->real_hw); +static void +ice_dcf_stat_update_32(uint64_t *offset, uint64_t *stat) +{ + if (*stat >= *offset) + *stat = (uint64_t)(*stat - *offset); + else + *stat = (uint64_t)((*stat + + ((uint64_t)1 << ICE_DCF_32_BIT_WIDTH)) - *offset); } static void -ice_dcf_queue_release(__rte_unused void *q) +ice_dcf_update_stats(struct virtchnl_eth_stats *oes, + struct virtchnl_eth_stats *nes) { + ice_dcf_stat_update_48(&oes->rx_bytes, &nes->rx_bytes); + ice_dcf_stat_update_48(&oes->rx_unicast, &nes->rx_unicast); + ice_dcf_stat_update_48(&oes->rx_multicast, &nes->rx_multicast); + ice_dcf_stat_update_48(&oes->rx_broadcast, &nes->rx_broadcast); + ice_dcf_stat_update_32(&oes->rx_discards, &nes->rx_discards); + ice_dcf_stat_update_48(&oes->tx_bytes, &nes->tx_bytes); + ice_dcf_stat_update_48(&oes->tx_unicast, &nes->tx_unicast); + ice_dcf_stat_update_48(&oes->tx_multicast, &nes->tx_multicast); + ice_dcf_stat_update_48(&oes->tx_broadcast, &nes->tx_broadcast); + ice_dcf_stat_update_32(&oes->tx_errors, &nes->tx_errors); + ice_dcf_stat_update_32(&oes->tx_discards, &nes->tx_discards); } + static int -ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev, - __rte_unused int wait_to_complete) +ice_dcf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - return 0; + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct virtchnl_eth_stats pstats; + int ret; + + ret = ice_dcf_query_stats(hw, &pstats); + if (ret == 0) { + ice_dcf_update_stats(&hw->eth_stats_offset, &pstats); + stats->ipackets = pstats.rx_unicast + pstats.rx_multicast + + pstats.rx_broadcast - pstats.rx_discards; + stats->opackets = pstats.tx_broadcast + pstats.tx_multicast + + pstats.tx_unicast; + stats->imissed = pstats.rx_discards; + stats->oerrors = pstats.tx_errors + pstats.tx_discards; + stats->ibytes = pstats.rx_bytes; + stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN; + stats->obytes = pstats.tx_bytes; + } else { + PMD_DRV_LOG(ERR, "Get statistics failed"); + } + return ret; } static int -ice_dcf_rx_queue_setup(struct rte_eth_dev *dev, - uint16_t rx_queue_id, - __rte_unused uint16_t nb_rx_desc, - __rte_unused unsigned int socket_id, - __rte_unused const struct rte_eth_rxconf *rx_conf, - __rte_unused struct rte_mempool *mb_pool) +ice_dcf_stats_reset(struct rte_eth_dev *dev) { - struct ice_dcf_adapter *adapter = dev->data->dev_private; + struct ice_dcf_adapter *ad = dev->data->dev_private; + struct ice_dcf_hw *hw = &ad->real_hw; + struct virtchnl_eth_stats pstats; + int ret; - dev->data->rx_queues[rx_queue_id] = &adapter->rxqs[rx_queue_id]; + /* read stat values to clear hardware registers */ + ret = ice_dcf_query_stats(hw, &pstats); + if (ret != 0) + return ret; + + /* set stats offset base on current values */ + hw->eth_stats_offset = pstats; return 0; } -static int -ice_dcf_tx_queue_setup(struct rte_eth_dev *dev, - uint16_t tx_queue_id, - __rte_unused uint16_t nb_tx_desc, - __rte_unused unsigned int socket_id, - __rte_unused const struct rte_eth_txconf *tx_conf) +static void +ice_dcf_dev_close(struct rte_eth_dev *dev) { struct ice_dcf_adapter *adapter = dev->data->dev_private; - dev->data->tx_queues[tx_queue_id] = &adapter->txqs[tx_queue_id]; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + ice_dcf_uninit_parent_adapter(dev); + ice_dcf_uninit_hw(dev, &adapter->real_hw); +} + +static int +ice_dcf_link_update(__rte_unused struct rte_eth_dev *dev, + __rte_unused int wait_to_complete) +{ return 0; } @@ -276,10 +876,14 @@ static const struct eth_dev_ops ice_dcf_eth_dev_ops = { .dev_close = ice_dcf_dev_close, .dev_configure = ice_dcf_dev_configure, .dev_infos_get = ice_dcf_dev_info_get, - .rx_queue_setup = ice_dcf_rx_queue_setup, - .tx_queue_setup = ice_dcf_tx_queue_setup, - .rx_queue_release = ice_dcf_queue_release, - .tx_queue_release = ice_dcf_queue_release, + .rx_queue_setup = ice_rx_queue_setup, + .tx_queue_setup = ice_tx_queue_setup, + .rx_queue_release = ice_rx_queue_release, + .tx_queue_release = ice_tx_queue_release, + .rx_queue_start = ice_dcf_rx_queue_start, + .tx_queue_start = ice_dcf_tx_queue_start, + .rx_queue_stop = ice_dcf_rx_queue_stop, + .tx_queue_stop = ice_dcf_tx_queue_stop, .link_update = ice_dcf_link_update, .stats_get = ice_dcf_stats_get, .stats_reset = ice_dcf_stats_reset,