X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=8d55c436584d54364eb085352bf873668187ec32;hb=c830cb295411fc71c3f3d3591308d531937dd58f;hp=902ccaebc860ae5bdae11940f20e365f4543b103;hpb=0a5311e7eff552cd4745535770c938cad5723759;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 902ccaebc8..8d55c43658 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -37,6 +37,7 @@ #include #include #include +#include #include "fm10k.h" #include "base/fm10k_api.h" @@ -45,6 +46,8 @@ #define FM10K_MBXLOCK_DELAY_US 20 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL +#define MAIN_VSI_POOL_NUMBER 0 + /* Max try times to acquire switch status */ #define MAX_QUERY_SWITCH_STATE_TIMES 10 /* Wait interval to get switch status */ @@ -53,6 +56,16 @@ #define CHARS_PER_UINT32 (sizeof(uint32_t)) #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1) +/* default 1:1 map from queue ID to interrupt vector ID */ +#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id]) + +/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */ +#define MAX_LPORT_NUM 128 +#define GLORT_FD_Q_BASE 0x40 +#define GLORT_PF_MASK 0xFFC0 +#define GLORT_FD_MASK GLORT_PF_MASK +#define GLORT_FD_INDEX GLORT_FD_Q_BASE + static void fm10k_close_mbx_service(struct fm10k_hw *hw); static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev); static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev); @@ -61,10 +74,55 @@ static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev); static inline int fm10k_glort_valid(struct fm10k_hw *hw); static int fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); -static void -fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add); -static void -fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev); +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool); +static void fm10k_tx_queue_release(void *queue); +static void fm10k_rx_queue_release(void *queue); +static void fm10k_set_rx_function(struct rte_eth_dev *dev); +static void fm10k_set_tx_function(struct rte_eth_dev *dev); +static int fm10k_check_ftag(struct rte_devargs *devargs); + +struct fm10k_xstats_name_off { + char name[RTE_ETH_XSTATS_NAME_SIZE]; + unsigned offset; +}; + +struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = { + {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)}, + {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)}, + {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)}, + {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)}, + {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)}, + {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)}, + {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)}, + {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats, + nodesc_drop)}, +}; + +#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \ + sizeof(fm10k_hw_stats_strings[0])) + +struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)}, + {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)}, +}; + +#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \ + sizeof(fm10k_hw_stats_rx_q_strings[0])) + +struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = { + {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)}, + {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)}, +}; + +#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \ + sizeof(fm10k_hw_stats_tx_q_strings[0])) + +#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \ + (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS)) +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev); static void fm10k_mbx_initlock(struct fm10k_hw *hw) @@ -85,6 +143,65 @@ fm10k_mbx_unlock(struct fm10k_hw *hw) rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)); } +/* Stubs needed for linkage when vPMD is disabled */ +int __attribute__((weak)) +fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_recv_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +uint16_t __attribute__((weak)) +fm10k_recv_scattered_pkts_vec( + __rte_unused void *rx_queue, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +int __attribute__((weak)) +fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq) + +{ + return -1; +} + +void __attribute__((weak)) +fm10k_rx_queue_release_mbufs_vec( + __rte_unused struct fm10k_rx_queue *rxq) +{ + return; +} + +void __attribute__((weak)) +fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq) +{ + return; +} + +int __attribute__((weak)) +fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq) +{ + return -1; +} + +uint16_t __attribute__((weak)) +fm10k_xmit_pkts_vec(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + /* * reset queue to initial state, allocate software buffers used when starting * device. @@ -95,6 +212,7 @@ fm10k_mbx_unlock(struct fm10k_hw *hw) static inline int rx_queue_reset(struct fm10k_rx_queue *q) { + static const union fm10k_rx_desc zero = {{0} }; uint64_t dma_addr; int i, diag; PMD_INIT_FUNC_TRACE(); @@ -115,10 +233,22 @@ rx_queue_reset(struct fm10k_rx_queue *q) q->hw_ring[i].q.hdr_addr = dma_addr; } + /* initialize extra software ring entries. Space for these extra + * entries is always allocated. + */ + memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf)); + for (i = 0; i < q->nb_fake_desc; ++i) { + q->sw_ring[q->nb_desc + i] = &q->fake_mbuf; + q->hw_ring[q->nb_desc + i] = zero; + } + q->next_dd = 0; q->next_alloc = 0; q->next_trigger = q->alloc_thresh - 1; FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1); + q->rxrearm_start = 0; + q->rxrearm_nb = 0; + return 0; } @@ -137,6 +267,16 @@ rx_queue_clean(struct fm10k_rx_queue *q) for (i = 0; i < q->nb_desc; ++i) q->hw_ring[i] = zero; + /* zero faked descriptors */ + for (i = 0; i < q->nb_fake_desc; ++i) + q->hw_ring[q->nb_desc + i] = zero; + + /* vPMD driver has a different way of releasing mbufs. */ + if (q->rx_using_sse) { + fm10k_rx_queue_release_mbufs_vec(q); + return; + } + /* free software buffers */ for (i = 0; i < q->nb_desc; ++i) { if (q->sw_ring[i]) { @@ -180,7 +320,7 @@ rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) /* Wait 100us at most */ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { rte_delay_us(1); - reg = FM10K_READ_REG(hw, FM10K_RXQCTL(i)); + reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum)); if (!(reg & FM10K_RXQCTL_ENABLE)) break; } @@ -269,7 +409,7 @@ tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) /* Wait 100us at most */ for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) { rte_delay_us(1); - reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i)); + reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum)); if (!(reg & FM10K_TXDCTL_ENABLE)) break; } @@ -280,19 +420,100 @@ tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum) return 0; } +static int +fm10k_check_mq_mode(struct rte_eth_dev *dev) +{ + enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode; + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint16_t nb_rx_q = dev->data->nb_rx_queues; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) { + PMD_INIT_LOG(ERR, "DCB mode is not supported."); + return -EINVAL; + } + + if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG)) + return 0; + + if (hw->mac.type == fm10k_mac_vf) { + PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF."); + return -EINVAL; + } + + /* Check VMDQ queue pool number */ + if (vmdq_conf->nb_queue_pools > + sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT || + vmdq_conf->nb_queue_pools > nb_rx_q) { + PMD_INIT_LOG(ERR, "Too many of queue pools: %d", + vmdq_conf->nb_queue_pools); + return -EINVAL; + } + + return 0; +} + +static const struct fm10k_txq_ops def_txq_ops = { + .reset = tx_queue_reset, +}; + static int fm10k_dev_configure(struct rte_eth_dev *dev) { + int ret; + PMD_INIT_FUNC_TRACE(); if (dev->data->dev_conf.rxmode.hw_strip_crc == 0) PMD_INIT_LOG(WARNING, "fm10k always strip CRC"); + /* multipe queue mode checking */ + ret = fm10k_check_mq_mode(dev); + if (ret != 0) { + PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.", + ret); + return ret; + } return 0; } +/* fls = find last set bit = 32 minus the number of leading zeros */ +#ifndef fls +#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x)))) +#endif + static void -fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) +fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!vmdq_conf->pool_map[i].pools) + continue; + fm10k_mbx_lock(hw); + fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Add default mac address */ + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); +} + +static void +fm10k_dev_rss_configure(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct rte_eth_conf *dev_conf = &dev->data->dev_conf; @@ -310,8 +531,10 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) if (dev->data->nb_rx_queues == 1 || dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || - dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); return; + } /* random key is rss_intel_key (default) or user provided (rss_key) */ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL) @@ -363,6 +586,66 @@ fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc); } +static void +fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t i; + + for (i = 0; i < nb_lport_new; i++) { + /* Set unicast mode by default. App can change + * to other mode in other API func. + */ + fm10k_mbx_lock(hw); + hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i, + FM10K_XCAST_MODE_NONE); + fm10k_mbx_unlock(hw); + } +} + +static void +fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_vmdq_rx_conf *vmdq_conf; + struct rte_eth_conf *dev_conf = &dev->data->dev_conf; + struct fm10k_macvlan_filter_info *macvlan; + uint16_t nb_queue_pools = 0; /* pool number in configuration */ + uint16_t nb_lport_new; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + fm10k_dev_rss_configure(dev); + + /* only PF supports VMDQ */ + if (hw->mac.type != fm10k_mac_pf) + return; + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) + nb_queue_pools = vmdq_conf->nb_queue_pools; + + /* no pool number change, no need to update logic port and VLAN/MAC */ + if (macvlan->nb_queue_pools == nb_queue_pools) + return; + + nb_lport_new = nb_queue_pools ? nb_queue_pools : 1; + fm10k_dev_logic_port_update(dev, nb_lport_new); + + /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */ + memset(dev->data->mac_addrs, 0, + ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM); + ether_addr_copy((const struct ether_addr *)hw->mac.addr, + &dev->data->mac_addrs[0]); + memset(macvlan, 0, sizeof(*macvlan)); + macvlan->nb_queue_pools = nb_queue_pools; + + if (nb_queue_pools) + fm10k_dev_vmdq_rx_configure(dev); + else + fm10k_dev_pf_main_vsi_reset(dev); +} + static int fm10k_dev_tx_init(struct rte_eth_dev *dev) { @@ -389,6 +672,19 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "failed to disable queue %d", i); return -1; } + /* Enable use of FTAG bit in TX descriptor, PFVTCTL + * register is read-only for VF. + */ + if (fm10k_check_ftag(dev->pci_dev->devargs)) { + if (hw->mac.type == fm10k_mac_pf) { + FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + PMD_INIT_LOG(DEBUG, "FTAG mode is enabled"); + } else { + PMD_INIT_LOG(ERR, "VF FTAG is not supported."); + return -ENOTSUP; + } + } /* set location and size for descriptor ring */ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), @@ -396,7 +692,14 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_TDBAH(i), base_addr >> (CHAR_BIT * sizeof(uint32_t))); FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); + + /* assign default SGLORT for each TX queue */ + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); } + + /* set up vector or scalar TX function as appropriate */ + fm10k_set_tx_function(dev); + return 0; } @@ -404,17 +707,36 @@ static int fm10k_dev_rx_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int i, ret; struct fm10k_rx_queue *rxq; uint64_t base_addr; uint32_t size; uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + uint32_t logic_port = hw->mac.dglort_map; uint16_t buf_size; + uint16_t queue_stride = 0; - /* Disable RXINT to avoid possible interrupt */ - for (i = 0; i < hw->mac.max_queues; i++) + /* enable RXINT for interrupt mode */ + i = 0; + if (rte_intr_dp_is_en(intr_handle)) { + for (; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i)); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_ITR_AUTOMASK | + FM10K_ITR_MASK_CLEAR); + } + } + /* Disable other RXINT to avoid possible interrupt */ + for (; i < hw->mac.max_queues; i++) FM10K_WRITE_REG(hw, FM10K_RXINT(i), - 3 << FM10K_RXINT_TIMER_SHIFT); + 3 << FM10K_RXINT_TIMER_SHIFT); /* Setup RX queues */ for (i = 0; i < dev->data->nb_rx_queues; ++i) { @@ -449,7 +771,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) buf_size -= FM10K_RX_DATABUF_ALIGN; FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), - buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT); + (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) | + FM10K_SRRCTL_LOOPBACK_SUPPRESS); /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + @@ -457,7 +780,6 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) dev->data->dev_conf.rxmode.enable_scatter) { uint32_t reg; dev->data->scattered_rx = 1; - dev->rx_pkt_burst = fm10k_recv_scattered_pkts; reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i)); reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN; FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg); @@ -471,8 +793,24 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } - /* Configure RSS if applicable */ + /* Configure VMDQ/RSS if applicable */ fm10k_dev_mq_rx_configure(dev); + + /* Decide the best RX function */ + fm10k_set_rx_function(dev); + + /* update RX_SGLORT for loopback suppress*/ + if (hw->mac.type != fm10k_mac_pf) + return 0; + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools) + queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools; + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + if (i && queue_stride && !(i % queue_stride)) + logic_port++; + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port); + } + return 0; } @@ -522,6 +860,7 @@ fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) */ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0); FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } return err; @@ -540,6 +879,7 @@ fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) /* Free mbuf and clean HW ring */ rx_queue_clean(dev->data->rx_queues[rx_queue_id]); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } return 0; @@ -557,7 +897,9 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); if (tx_queue_id < dev->data->nb_tx_queues) { - tx_queue_reset(dev->data->tx_queues[tx_queue_id]); + struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id]; + + q->ops->reset(q); /* reset head and tail pointers */ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0); @@ -567,6 +909,7 @@ fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id), FM10K_TXDCTL_ENABLE | txdctl); FM10K_WRITE_FLUSH(hw); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; } else err = -1; @@ -583,6 +926,7 @@ fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) if (tx_queue_id < dev->data->nb_tx_queues) { tx_queue_disable(hw, tx_queue_id); tx_queue_clean(dev->data->tx_queues[tx_queue_id]); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; } return 0; @@ -603,7 +947,7 @@ fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; fm10k_mbx_lock(hw); @@ -625,7 +969,7 @@ fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->all_multicast == 1) @@ -651,7 +995,7 @@ fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; /* If promiscuous mode is enabled, it doesn't make sense to enable @@ -682,7 +1026,7 @@ fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->promiscuous) { @@ -701,10 +1045,40 @@ fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode"); } -/* fls = find last set bit = 32 minus the number of leading zeros */ -#ifndef fls -#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x)))) -#endif +static void +fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t dglortdec, pool_len, rss_len, i, dglortmask; + uint16_t nb_queue_pools; + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + nb_queue_pools = macvlan->nb_queue_pools; + pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0; + rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len; + + /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */ + dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len; + dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + hw->mac.dglort_map; + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask); + /* Configure VMDQ/RSS DGlort Decoder */ + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec); + + /* Flow Director configurations, only queue number is valid. */ + dglortdec = fls(dev->data->nb_rx_queues - 1); + dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) | + (hw->mac.dglort_map + GLORT_FD_Q_BASE); + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask); + FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec); + + /* Invalidate all other GLORT entries */ + for (i = 2; i < FM10K_DGLORT_COUNT; i++) + FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), + FM10K_DGLORTMAP_NONE); +} + #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1) static int fm10k_dev_start(struct rte_eth_dev *dev) @@ -739,26 +1113,17 @@ fm10k_dev_start(struct rte_eth_dev *dev) return diag; } + if (fm10k_dev_rxq_interrupt_setup(dev)) + return -EIO; + diag = fm10k_dev_rx_init(dev); if (diag) { PMD_INIT_LOG(ERR, "RX init failed: %d", diag); return diag; } - if (hw->mac.type == fm10k_mac_pf) { - /* Establish only VSI 0 as valid */ - FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), FM10K_DGLORTMAP_ANY); - - /* Configure RSS bits used in RETA table */ - FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), - fls(dev->data->nb_rx_queues - 1) << - FM10K_DGLORTDEC_RSSLENGTH_SHIFT); - - /* Invalidate all other GLORT entries */ - for (i = 1; i < FM10K_DGLORT_COUNT; i++) - FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i), - FM10K_DGLORTMAP_NONE); - } + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_dglort_map_configure(dev); for (i = 0; i < dev->data->nb_rx_queues; i++) { struct fm10k_rx_queue *rxq; @@ -784,14 +1149,16 @@ fm10k_dev_start(struct rte_eth_dev *dev) diag = fm10k_dev_tx_queue_start(dev, i); if (diag != 0) { int j; + for (j = 0; j < i; ++j) + tx_queue_clean(dev->data->tx_queues[j]); for (j = 0; j < dev->data->nb_rx_queues; ++j) rx_queue_clean(dev->data->rx_queues[j]); return diag; } } - /* Update default vlan */ - if (hw->mac.default_vid && hw->mac.default_vid <= ETHER_MAX_VLAN_ID) + /* Update default vlan when not in VMDQ mode */ + if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) fm10k_vlan_filter_set(dev, hw->mac.default_vid, true); return 0; @@ -799,16 +1166,59 @@ fm10k_dev_start(struct rte_eth_dev *dev) static void fm10k_dev_stop(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues) + for (i = 0; i < dev->data->nb_tx_queues; i++) + fm10k_dev_tx_queue_stop(dev, i); + + if (dev->data->rx_queues) + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_dev_rx_queue_stop(dev, i); + + /* Disable datapath event */ + if (rte_intr_dp_is_en(intr_handle)) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + FM10K_WRITE_REG(hw, FM10K_RXINT(i), + 3 << FM10K_RXINT_TIMER_SHIFT); + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)), + FM10K_ITR_MASK_SET); + } + } + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; +} + +static void +fm10k_dev_queue_release(struct rte_eth_dev *dev) { int i; PMD_INIT_FUNC_TRACE(); - for (i = 0; i < dev->data->nb_tx_queues; i++) - fm10k_dev_tx_queue_stop(dev, i); + if (dev->data->tx_queues) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct fm10k_tx_queue *txq = dev->data->tx_queues[i]; - for (i = 0; i < dev->data->nb_rx_queues; i++) - fm10k_dev_rx_queue_stop(dev, i); + tx_queue_free(txq); + } + } + + if (dev->data->rx_queues) { + for (i = 0; i < dev->data->nb_rx_queues; i++) + fm10k_rx_queue_release(dev->data->rx_queues[i]); + } } static void @@ -818,11 +1228,15 @@ fm10k_dev_close(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - fm10k_MACVLAN_remove_all(dev); + fm10k_mbx_lock(hw); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, false); + fm10k_mbx_unlock(hw); /* Stop mailbox service first */ fm10k_close_mbx_service(hw); fm10k_dev_stop(dev); + fm10k_dev_queue_release(dev); fm10k_stop_hw(hw); } @@ -837,11 +1251,86 @@ fm10k_link_update(struct rte_eth_dev *dev, * is no 50Gbps Ethernet. */ dev->data->dev_link.link_speed = 0; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - dev->data->dev_link.link_status = 1; + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; } +static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + unsigned i, q; + unsigned count = 0; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", fm10k_hw_stats_strings[count].name); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + count++; + } + } + } + return FM10K_NB_XSTATS; +} + +static int +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, + unsigned n) +{ + struct fm10k_hw_stats *hw_stats = + FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + unsigned i, q, count = 0; + + if (n < FM10K_NB_XSTATS) + return FM10K_NB_XSTATS; + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + xstats[count].value = *(uint64_t *)(((char *)hw_stats) + + fm10k_hw_stats_strings[count].offset); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_rx_q_strings[i].offset); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + xstats[count].value = + *(uint64_t *)(((char *)&hw_stats->q[q]) + + fm10k_hw_stats_tx_q_strings[i].offset); + count++; + } + } + + return FM10K_NB_XSTATS; +} + static void fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { @@ -902,14 +1391,22 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM; dev_info->max_hash_mac_addrs = 0; dev_info->max_vfs = dev->pci_dev->max_vfs; - dev_info->max_vmdq_pools = ETH_64_POOLS; + dev_info->vmdq_pool_base = 0; + dev_info->vmdq_queue_base = 0; + dev_info->max_vmdq_pools = ETH_32_POOLS; + dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF; dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_VLAN_INSERT; + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t); dev_info->reta_size = FM10K_MAX_RSS_INDICES; @@ -931,11 +1428,74 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, }, .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0), .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0), - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .txq_flags = FM10K_SIMPLE_TX_FLAG, + }; + + dev_info->rx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_RX_DESC, + .nb_min = FM10K_MIN_RX_DESC, + .nb_align = FM10K_MULT_RX_DESC, }; + dev_info->tx_desc_lim = (struct rte_eth_desc_lim) { + .nb_max = FM10K_MAX_TX_DESC, + .nb_min = FM10K_MIN_TX_DESC, + .nb_align = FM10K_MULT_TX_DESC, + }; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == fm10k_recv_pkts || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts) { + static uint32_t ptypes[] = { + /* refers to rx_desc_to_ol_flags() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; + } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) { + static uint32_t ptypes_vec[] = { + /* refers to fm10k_desc_to_pktype_v() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + return ptypes_vec; + } + + return NULL; } +#else +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + return NULL; +} +#endif static int fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) @@ -950,9 +1510,14 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */ + PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode"); + return -EINVAL; + } + if (vlan_id > ETH_VLAN_ID_MAX) { PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096"); - return (-EINVAL); + return -EINVAL; } vid_idx = FM10K_VFTA_IDX(vlan_id); @@ -964,7 +1529,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) { PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing " "in the VLAN filter table"); - return (-EINVAL); + return -EINVAL; } fm10k_mbx_lock(hw); @@ -972,7 +1537,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) fm10k_mbx_unlock(hw); if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "VLAN update failed: %d", result); - return (-EIO); + return -EIO; } for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) && @@ -993,7 +1558,7 @@ fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (result != FM10K_SUCCESS) { PMD_INIT_LOG(ERR, "MAC address update failed: %d", result); - return (-EIO); + return -EIO; } if (on) { @@ -1015,50 +1580,92 @@ fm10k_vlan_offload_set(__rte_unused struct rte_eth_dev *dev, int mask) "always on in fm10k"); } - if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) - PMD_INIT_LOG(ERR, "VLAN QinQ is not " - "supported in fm10k"); + if (mask & ETH_VLAN_EXTEND_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_extend) + PMD_INIT_LOG(ERR, "VLAN QinQ is not " + "supported in fm10k"); + } + + if (mask & ETH_VLAN_FILTER_MASK) { + if (!dev->data->dev_conf.rxmode.hw_vlan_filter) + PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); + } +} + +/* Add/Remove a MAC address, and update filters to main VSI */ +static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + uint32_t i, j, k; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + + if (pool != MAIN_VSI_POOL_NUMBER) { + PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set " + "mac to pool %u", pool); + return; + } + for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) { + if (!macvlan->vfta[j]) + continue; + for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { + if (!(macvlan->vfta[j] & (1 << k))) + continue; + if (i + 1 > macvlan->vlan_num) { + PMD_INIT_LOG(ERR, "vlan number not match"); + return; + } + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac, + j * FM10K_UINT32_BIT_SIZE + k, add, 0); + fm10k_mbx_unlock(hw); + i++; + } + } +} + +/* Add/Remove a MAC address, and update filters to VMDQ */ +static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; + struct rte_eth_vmdq_rx_conf *vmdq_conf; + uint32_t i; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf; + + if (pool > macvlan->nb_queue_pools) { + PMD_DRV_LOG(ERR, "Pool number %u invalid." + " Max pool is %u", + pool, macvlan->nb_queue_pools); + return; } - - if (mask & ETH_VLAN_FILTER_MASK) { - if (!dev->data->dev_conf.rxmode.hw_vlan_filter) - PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k"); + for (i = 0; i < vmdq_conf->nb_pool_maps; i++) { + if (!(vmdq_conf->pool_map[i].pools & (1UL << pool))) + continue; + fm10k_mbx_lock(hw); + fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac, + vmdq_conf->pool_map[i].vlan_id, add, 0); + fm10k_mbx_unlock(hw); } } /* Add/Remove a MAC address, and update filters */ -static void -fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add) +static void fm10k_MAC_filter_set(struct rte_eth_dev *dev, + const u8 *mac, bool add, uint32_t pool) { - uint32_t i, j, k; - struct fm10k_hw *hw; struct fm10k_macvlan_filter_info *macvlan; - hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); - i = 0; - for (j = 0; j < FM10K_VFTA_SIZE; j++) { - if (macvlan->vfta[j]) { - for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { - if (macvlan->vfta[j] & (1 << k)) { - if (i + 1 > macvlan->vlan_num) { - PMD_INIT_LOG(ERR, "vlan number " - "not match"); - return; - } - fm10k_mbx_lock(hw); - fm10k_update_uc_addr(hw, - hw->mac.dglort_map, mac, - j * FM10K_UINT32_BIT_SIZE + k, - add, 0); - fm10k_mbx_unlock(hw); - i++; - } - } - } - } + if (macvlan->nb_queue_pools > 0) /* VMDQ mode */ + fm10k_MAC_filter_set_vmdq(dev, mac, add, pool); + else + fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool); if (add) macvlan->mac_num++; @@ -1069,11 +1676,15 @@ fm10k_MAC_filter_set(struct rte_eth_dev *dev, const u8 *mac, bool add) /* Add a MAC address, and update filters */ static void fm10k_macaddr_add(struct rte_eth_dev *dev, - struct ether_addr *mac_addr, - __rte_unused uint32_t index, - __rte_unused uint32_t pool) + struct ether_addr *mac_addr, + uint32_t index, + uint32_t pool) { - fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE); + struct fm10k_macvlan_filter_info *macvlan; + + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool); + macvlan->mac_vmdq_id[index] = pool; } /* Remove a MAC address, and update filters */ @@ -1081,29 +1692,12 @@ static void fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index) { struct rte_eth_dev_data *data = dev->data; - - if (index < FM10K_MAX_MACADDR_NUM) - fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes, - FALSE); -} - -/* Remove all VLAN and MAC address table entries */ -static void -fm10k_MACVLAN_remove_all(struct rte_eth_dev *dev) -{ - uint32_t j, k; struct fm10k_macvlan_filter_info *macvlan; macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); - for (j = 0; j < FM10K_VFTA_SIZE; j++) { - if (macvlan->vfta[j]) { - for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) { - if (macvlan->vfta[j] & (1 << k)) - fm10k_vlan_filter_set(dev, - j * FM10K_UINT32_BIT_SIZE + k, false); - } - } - } + fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes, + FALSE, macvlan->mac_vmdq_id[index]); + macvlan->mac_vmdq_id[index] = 0; } static inline int @@ -1115,34 +1709,6 @@ check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request) return 0; } -/* - * Create a memzone for hardware descriptor rings. Malloc cannot be used since - * the physical address is required. If the memzone is already created, then - * this function returns a pointer to the existing memzone. - */ -static inline const struct rte_memzone * -allocate_hw_ring(const char *driver_name, const char *ring_name, - uint8_t port_id, uint16_t queue_id, int socket_id, - uint32_t size, uint32_t align) -{ - char name[RTE_MEMZONE_NAMESIZE]; - const struct rte_memzone *mz; - - snprintf(name, sizeof(name), "%s_%s_%d_%d_%d", - driver_name, ring_name, port_id, queue_id, socket_id); - - /* return the memzone if it already exists */ - mz = rte_memzone_lookup(name); - if (mz) - return mz; - -#ifdef RTE_LIBRTE_XEN_DOM0 - return rte_memzone_reserve_bounded(name, size, socket_id, 0, align, - RTE_PGSIZE_2M); -#else - return rte_memzone_reserve_aligned(name, size, socket_id, 0, align); -#endif -} static inline int check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request) @@ -1175,7 +1741,7 @@ handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf) rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q), FM10K_RX_FREE_THRESH_MIN(q), FM10K_RX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->alloc_thresh = rx_free_thresh; @@ -1222,6 +1788,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); struct fm10k_rx_queue *q; const struct rte_memzone *mz; @@ -1230,7 +1797,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, /* make sure the mempool element size can account for alignment. */ if (!mempool_element_size_valid(mp)) { PMD_INIT_LOG(ERR, "Error : Mempool element size is too small"); - return (-EINVAL); + return -EINVAL; } /* make sure a valid number of descriptors have been requested */ @@ -1242,7 +1809,7 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC, FM10K_MULT_RX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1260,27 +1827,28 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ q->mp = mp; q->nb_desc = nb_desc; + q->nb_fake_desc = FM10K_MULT_RX_DESC; q->port_id = dev->data->port_id; q->queue_id = queue_id; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)]; if (handle_rxconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", - nb_desc * sizeof(struct rte_mbuf *), - RTE_CACHE_LINE_SIZE, socket_id); + (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, socket_id); if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1288,21 +1856,27 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, * enough to hold the maximum ring size is requested to allow for * resizing in later calls to the queue setup function. */ - mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring", - dev->data->port_id, queue_id, socket_id, - FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC); + mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id, + FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC, + socket_id); if (mz == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; -#ifdef RTE_LIBRTE_XEN_DOM0 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); -#else - q->hw_ring_phys_addr = mz->phys_addr; -#endif + + /* Check if number of descs satisfied Vector requirement */ + if (!rte_is_power_of_2(nb_desc)) { + PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx " + "preconditions - canceling the feature for " + "the whole port[%d]", + q->queue_id, q->port_id); + dev_info->rx_vec_allowed = false; + } else + fm10k_rxq_vec_setup(q); dev->data->rx_queues[queue_id] = q; return 0; @@ -1341,7 +1915,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q), FM10K_TX_FREE_THRESH_MIN(q), FM10K_TX_FREE_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->free_thresh = tx_free_thresh; @@ -1365,7 +1939,7 @@ handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf) tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q), FM10K_TX_RS_THRESH_MIN(q), FM10K_TX_RS_THRESH_DIV(q)); - return (-EINVAL); + return -EINVAL; } q->rs_thresh = tx_rs_thresh; @@ -1393,7 +1967,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, "and a multiple of %u", nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC, FM10K_MULT_TX_DESC); - return (-EINVAL); + return -EINVAL; } /* @@ -1402,7 +1976,9 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, * different socket than was previously used. */ if (dev->data->tx_queues[queue_id] != NULL) { - tx_queue_free(dev->data->tx_queues[queue_id]); + struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id]; + + tx_queue_free(txq); dev->data->tx_queues[queue_id] = NULL; } @@ -1411,17 +1987,19 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, socket_id); if (q == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); - return (-ENOMEM); + return -ENOMEM; } /* setup queue */ q->nb_desc = nb_desc; q->port_id = dev->data->port_id; q->queue_id = queue_id; + q->txq_flags = conf->txq_flags; + q->ops = &def_txq_ops; q->tail_ptr = (volatile uint32_t *) &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)]; if (handle_txconf(q, conf)) - return (-EINVAL); + return -EINVAL; /* allocate memory for the software ring */ q->sw_ring = rte_zmalloc_socket("fm10k sw ring", @@ -1430,7 +2008,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, if (q->sw_ring == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate software ring"); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } /* @@ -1438,21 +2016,17 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, * enough to hold the maximum ring size is requested to allow for * resizing in later calls to the queue setup function. */ - mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring", - dev->data->port_id, queue_id, socket_id, - FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC); + mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id, + FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC, + socket_id); if (mz == NULL) { PMD_INIT_LOG(ERR, "Cannot allocate hardware ring"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } q->hw_ring = mz->addr; -#ifdef RTE_LIBRTE_XEN_DOM0 q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr); -#else - q->hw_ring_phys_addr = mz->phys_addr; -#endif /* * allocate memory for the RS bit tracker. Enough slots to hold the @@ -1466,7 +2040,7 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker"); rte_free(q->sw_ring); rte_free(q); - return (-ENOMEM); + return -ENOMEM; } dev->data->tx_queues[queue_id] = q; @@ -1476,9 +2050,10 @@ fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id, static void fm10k_tx_queue_release(void *queue) { + struct fm10k_tx_queue *q = queue; PMD_INIT_FUNC_TRACE(); - tx_queue_free(queue); + tx_queue_free(q); } static int @@ -1584,8 +2159,8 @@ fm10k_rss_hash_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * - FM10K_RSSRK_ENTRIES_PER_REG) + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) return -EINVAL; if (hf == 0) @@ -1627,8 +2202,8 @@ fm10k_rss_hash_conf_get(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); - if (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * - FM10K_RSSRK_ENTRIES_PER_REG) + if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE * + FM10K_RSSRK_ENTRIES_PER_REG)) return -EINVAL; if (key != NULL) @@ -1659,14 +2234,14 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; /* Bind all local non-queue interrupt to vector 0 */ - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); /* Enable misc causes */ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -1684,6 +2259,36 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } +static void +fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); + + /* Disable misc causes */ + FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | + FM10K_EIMR_DISABLE(THI_FAULT) | + FM10K_EIMR_DISABLE(FUM_FAULT) | + FM10K_EIMR_DISABLE(MAILBOX) | + FM10K_EIMR_DISABLE(SWITCHREADY) | + FM10K_EIMR_DISABLE(SWITCHNOTREADY) | + FM10K_EIMR_DISABLE(SRAMERROR) | + FM10K_EIMR_DISABLE(VFLR)); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + static void fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) { @@ -1691,7 +2296,7 @@ fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) uint32_t int_map = FM10K_INT_MAP_IMMEDIATE; /* Bind all local non-queue interrupt to vector 0 */ - int_map |= 0; + int_map |= FM10K_MISC_VEC_ID; /* Only INT 0 available, other 15 are reserved. */ FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); @@ -1702,6 +2307,113 @@ fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev) FM10K_WRITE_FLUSH(hw); } +static void +fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t int_map = FM10K_INT_MAP_DISABLE; + + int_map |= FM10K_MISC_VEC_ID; + + /* Only INT 0 available, other 15 are reserved. */ + FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map); + + /* Disable ITR 0 */ + FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET); + FM10K_WRITE_FLUSH(hw); +} + +static int +fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR); + rte_intr_enable(&dev->pci_dev->intr_handle); + return 0; +} + +static int +fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Disable ITR */ + if (hw->mac.type == fm10k_mac_pf) + FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)), + FM10K_ITR_MASK_SET); + else + FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)), + FM10K_ITR_MASK_SET); + return 0; +} + +static int +fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector, vec; + uint16_t queue_id; + int result = 0; + + /* fm10k needs one separate interrupt for mailbox, + * so only drivers which support multiple interrupt vectors + * e.g. vfio-pci can work for fm10k interrupt mode + */ + if (!rte_intr_cap_multiple(intr_handle) || + dev->data->dev_conf.intr_conf.rxq == 0) + return result; + + intr_vector = dev->data->nb_rx_queues; + + /* disable interrupt first */ + rte_intr_disable(&dev->pci_dev->intr_handle); + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_disable_intr_pf(dev); + else + fm10k_dev_disable_intr_vf(dev); + + if (rte_intr_efd_enable(intr_handle, intr_vector)) { + PMD_INIT_LOG(ERR, "Failed to init event fd"); + result = -EIO; + } + + if (rte_intr_dp_is_en(intr_handle) && !result) { + intr_handle->intr_vec = rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (intr_handle->intr_vec) { + for (queue_id = 0, vec = FM10K_RX_VEC_START; + queue_id < dev->data->nb_rx_queues; + queue_id++) { + intr_handle->intr_vec[queue_id] = vec; + if (vec < intr_handle->nb_efd - 1 + + FM10K_RX_VEC_START) + vec++; + } + } else { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec", dev->data->nb_rx_queues); + rte_intr_efd_disable(intr_handle); + result = -ENOMEM; + } + } + + if (hw->mac.type == fm10k_mac_pf) + fm10k_dev_enable_intr_pf(dev); + else + fm10k_dev_enable_intr_vf(dev); + rte_intr_enable(&dev->pci_dev->intr_handle); + hw->mac.ops.update_int_moderator(hw); + return result; +} + static int fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) { @@ -1710,7 +2422,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) const char *estr = "Unknown error"; /* Process PCA fault */ - if (eicr & FM10K_EIMR_PCA_FAULT) { + if (eicr & FM10K_EICR_PCA_FAULT) { err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault); if (err) goto error; @@ -1738,7 +2450,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) } /* Process THI fault */ - if (eicr & FM10K_EIMR_THI_FAULT) { + if (eicr & FM10K_EICR_THI_FAULT) { err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault); if (err) goto error; @@ -1756,7 +2468,7 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) } /* Process FUM fault */ - if (eicr & FM10K_EIMR_FUM_FAULT) { + if (eicr & FM10K_EICR_FUM_FAULT) { err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault); if (err) goto error; @@ -1793,8 +2505,6 @@ fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr) fault.address, fault.specinfo); } - if (estr) - return 0; return 0; error: PMD_INIT_LOG(ERR, "Failed to handle fault event."); @@ -1910,29 +2620,16 @@ static const struct fm10k_msg_data fm10k_msgdata_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -/* Mailbox message handler in PF */ -static const struct fm10k_msg_data fm10k_msgdata_pf[] = { - FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), - FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), - FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - static int fm10k_setup_mbx_service(struct fm10k_hw *hw) { - int err; + int err = 0; /* Initialize mailbox lock */ fm10k_mbx_initlock(hw); /* Replace default message handler with new ones */ - if (hw->mac.type == fm10k_mac_pf) - err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf); - else + if (hw->mac.type == fm10k_mac_vf) err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf); if (err) { @@ -1961,9 +2658,13 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .allmulticast_enable = fm10k_dev_allmulticast_enable, .allmulticast_disable = fm10k_dev_allmulticast_disable, .stats_get = fm10k_stats_get, + .xstats_get = fm10k_xstats_get, + .xstats_get_names = fm10k_xstats_get_names, .stats_reset = fm10k_stats_reset, + .xstats_reset = fm10k_stats_reset, .link_update = fm10k_link_update, .dev_infos_get = fm10k_dev_infos_get, + .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get, .vlan_filter_set = fm10k_vlan_filter_set, .vlan_offload_set = fm10k_vlan_offload_set, .mac_addr_add = fm10k_macaddr_add, @@ -1976,17 +2677,151 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .rx_queue_release = fm10k_rx_queue_release, .tx_queue_setup = fm10k_tx_queue_setup, .tx_queue_release = fm10k_tx_queue_release, + .rx_descriptor_done = fm10k_dev_rx_descriptor_done, + .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable, .reta_update = fm10k_reta_update, .reta_query = fm10k_reta_query, .rss_hash_update = fm10k_rss_hash_update, .rss_hash_conf_get = fm10k_rss_hash_conf_get, }; +static int ftag_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +fm10k_check_ftag(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *ftag_key = "enable_ftag"; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, ftag_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* FTAG is enabled when there's key-value pair: enable_ftag=1 */ + if (rte_kvargs_process(kvlist, ftag_key, + ftag_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void __attribute__((cold)) +fm10k_set_tx_function(struct rte_eth_dev *dev) +{ + struct fm10k_tx_queue *txq; + int i; + int use_sse = 1; + uint16_t tx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + tx_ftag_en = 1; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + txq->tx_ftag_en = tx_ftag_en; + /* Check if Vector Tx is satisfied */ + if (fm10k_tx_vec_condition_check(txq)) + use_sse = 0; + } + + if (use_sse) { + PMD_INIT_LOG(DEBUG, "Use vector Tx func"); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + fm10k_txq_vec_setup(txq); + } + dev->tx_pkt_burst = fm10k_xmit_pkts_vec; + } else { + dev->tx_pkt_burst = fm10k_xmit_pkts; + PMD_INIT_LOG(DEBUG, "Use regular Tx func"); + } +} + +static void __attribute__((cold)) +fm10k_set_rx_function(struct rte_eth_dev *dev) +{ + struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); + uint16_t i, rx_using_sse; + uint16_t rx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + rx_ftag_en = 1; + + /* In order to allow Vector Rx there are a few configuration + * conditions to be met. + */ + if (!fm10k_rx_vec_condition_check(dev) && + dev_info->rx_vec_allowed && !rx_ftag_en) { + if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec; + else + dev->rx_pkt_burst = fm10k_recv_pkts_vec; + } else if (dev->data->scattered_rx) + dev->rx_pkt_burst = fm10k_recv_scattered_pkts; + else + dev->rx_pkt_burst = fm10k_recv_pkts; + + rx_using_sse = + (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_pkts_vec); + + if (rx_using_sse) + PMD_INIT_LOG(DEBUG, "Use vector Rx func"); + else + PMD_INIT_LOG(DEBUG, "Use regular Rx func"); + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; + + rxq->rx_using_sse = rx_using_sse; + rxq->rx_ftag_en = rx_ftag_en; + } +} + +static void +fm10k_params_init(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev); + + /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but + * there is no way to get link status without reading BAR4. Until this + * works, assume we have maximum bandwidth. + * @todo - fix bus info + */ + hw->bus_caps.speed = fm10k_bus_speed_8000; + hw->bus_caps.width = fm10k_bus_width_pcie_x8; + hw->bus_caps.payload = fm10k_bus_payload_512; + hw->bus.speed = fm10k_bus_speed_8000; + hw->bus.width = fm10k_bus_width_pcie_x8; + hw->bus.payload = fm10k_bus_payload_256; + + info->rx_vec_allowed = true; +} + static int eth_fm10k_dev_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int diag; + int diag, i; struct fm10k_macvlan_filter_info *macvlan; PMD_INIT_FUNC_TRACE(); @@ -1995,13 +2830,12 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) dev->rx_pkt_burst = &fm10k_recv_pkts; dev->tx_pkt_burst = &fm10k_xmit_pkts; - if (dev->data->scattered_rx) - dev->rx_pkt_burst = &fm10k_recv_scattered_pkts; - /* only initialize in the primary process */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; + rte_eth_copy_pci_info(dev, dev->pci_dev); + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); memset(macvlan, 0, sizeof(*macvlan)); /* Vendor and Device ID need to be set before init of shared code */ @@ -2028,18 +2862,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) return -EIO; } - /* - * Inialize bus info. Normally we would call fm10k_get_bus_info(), but - * there is no way to get link status without reading BAR4. Until this - * works, assume we have maximum bandwidth. - * @todo - fix bus info - */ - hw->bus_caps.speed = fm10k_bus_speed_8000; - hw->bus_caps.width = fm10k_bus_width_pcie_x8; - hw->bus_caps.payload = fm10k_bus_payload_512; - hw->bus.speed = fm10k_bus_speed_8000; - hw->bus.width = fm10k_bus_width_pcie_x8; - hw->bus.payload = fm10k_bus_payload_256; + /* Initialize parameters */ + fm10k_params_init(dev); /* Initialize the hw */ diag = fm10k_init_hw(hw); @@ -2103,7 +2927,7 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) fm10k_dev_enable_intr_vf(dev); } - /* Enable uio intr after callback registered */ + /* Enable intr after callback registered */ rte_intr_enable(&(dev->pci_dev->intr_handle)); hw->mac.ops.update_int_moderator(hw); @@ -2111,7 +2935,6 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) /* Make sure Switch Manager is ready before going forward. */ if (hw->mac.type == fm10k_mac_pf) { int switch_ready = 0; - int i; for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { fm10k_mbx_lock(hw); @@ -2138,7 +2961,8 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) */ fm10k_mbx_lock(hw); /* Enable port first */ - hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, 1, 1); + hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map, + MAX_LPORT_NUM, 1); /* Set unicast mode by default. App can change to other mode in other * API func. @@ -2148,8 +2972,72 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) fm10k_mbx_unlock(hw); + /* Make sure default VID is ready before going forward. */ + if (hw->mac.type == fm10k_mac_pf) { + for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) { + if (hw->mac.default_vid) + break; + /* Delay some time to acquire async port VLAN info. */ + rte_delay_us(WAIT_SWITCH_MSG_US); + } + + if (!hw->mac.default_vid) { + PMD_INIT_LOG(ERR, "default VID is not ready"); + return -1; + } + } + /* Add default mac address */ - fm10k_MAC_filter_set(dev, hw->mac.addr, true); + fm10k_MAC_filter_set(dev, hw->mac.addr, true, + MAIN_VSI_POOL_NUMBER); + + return 0; +} + +static int +eth_fm10k_dev_uninit(struct rte_eth_dev *dev) +{ + struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + fm10k_dev_close(dev); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + /* disable uio/vfio intr */ + rte_intr_disable(&(dev->pci_dev->intr_handle)); + + /*PF/VF has different interrupt handling mechanism */ + if (hw->mac.type == fm10k_mac_pf) { + /* disable interrupt */ + fm10k_dev_disable_intr_pf(dev); + + /* unregister callback func to eal lib */ + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_pf, (void *)dev); + } else { + /* disable interrupt */ + fm10k_dev_disable_intr_vf(dev); + + rte_intr_callback_unregister(&(dev->pci_dev->intr_handle), + fm10k_dev_interrupt_handler_vf, (void *)dev); + } + + /* free mac memory */ + if (dev->data->mac_addrs) { + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + } + + memset(hw, 0, sizeof(*hw)); return 0; } @@ -2159,9 +3047,9 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev) * and SRIOV-VF devices. */ static const struct rte_pci_id pci_id_fm10k_map[] = { -#define RTE_PCI_DEV_ID_DECL_FM10K(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#define RTE_PCI_DEV_ID_DECL_FM10KVF(vend, dev) { RTE_PCI_DEVICE(vend, dev) }, -#include "rte_pci_dev_ids.h" + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) }, + { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) }, { .vendor_id = 0, /* sentinel */ }, }; @@ -2169,29 +3057,15 @@ static struct eth_driver rte_pmd_fm10k = { .pci_drv = { .name = "rte_pmd_fm10k", .id_table = pci_id_fm10k_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, + .probe = rte_eth_dev_pci_probe, + .remove = rte_eth_dev_pci_remove, }, .eth_dev_init = eth_fm10k_dev_init, + .eth_dev_uninit = eth_fm10k_dev_uninit, .dev_private_size = sizeof(struct fm10k_adapter), }; -/* - * Driver initialization routine. - * Invoked once at EAL init time. - * Register itself as the [Poll Mode] Driver of PCI FM10K devices. - */ -static int -rte_pmd_fm10k_init(__rte_unused const char *name, - __rte_unused const char *params) -{ - PMD_INIT_FUNC_TRACE(); - rte_eth_driver_register(&rte_pmd_fm10k); - return 0; -} - -static struct rte_driver rte_fm10k_driver = { - .type = PMD_PDEV, - .init = rte_pmd_fm10k_init, -}; - -PMD_REGISTER_DRIVER(rte_fm10k_driver); +DRIVER_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv); +DRIVER_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);