* Copyright(c) 2013-2016 Intel Corporation
*/
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#define GLORT_FD_MASK GLORT_PF_MASK
#define GLORT_FD_INDEX GLORT_FD_Q_BASE
+int fm10k_logtype_init;
+int fm10k_logtype_driver;
+
static void fm10k_close_mbx_service(struct fm10k_hw *hw);
static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int fm10k_check_ftag(struct rte_devargs *devargs);
static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
+static void fm10k_dev_infos_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev);
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev);
+
struct fm10k_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
PMD_INIT_FUNC_TRACE();
- if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
+ /* KEEP_CRC offload flag is not supported by PMD
+ * can remove the below block when DEV_RX_OFFLOAD_CRC_STRIP removed
+ */
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
return ret;
}
+ dev->data->scattered_rx = 0;
+
return 0;
}
/* It adds dual VLAN length for supporting dual VLAN */
if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
- dev->data->dev_conf.rxmode.enable_scatter) {
+ rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->scattered_rx = 1;
reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int err = -1;
+ int err;
uint32_t reg;
struct fm10k_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- rxq = dev->data->rx_queues[rx_queue_id];
- err = rx_queue_reset(rxq);
- if (err == -ENOMEM) {
- PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
- return err;
- } else if (err == -EINVAL) {
- PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
- " %d", err);
- return err;
- }
+ rxq = dev->data->rx_queues[rx_queue_id];
+ err = rx_queue_reset(rxq);
+ if (err == -ENOMEM) {
+ PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
+ return err;
+ } else if (err == -EINVAL) {
+ PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
+ " %d", err);
+ return err;
+ }
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled on real
- * hardware, but BEFORE the queue is enabled when using the
- * emulation platform. Do it in both places for now and remove
- * this comment and the following two register writes when the
- * emulation platform is no longer being used.
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled on real
+ * hardware, but BEFORE the queue is enabled when using the
+ * emulation platform. Do it in both places for now and remove
+ * this comment and the following two register writes when the
+ * emulation platform is no longer being used.
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- /* Set PF ownership flag for PF devices */
- reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
- if (hw->mac.type == fm10k_mac_pf)
- reg |= FM10K_RXQCTL_PF;
- reg |= FM10K_RXQCTL_ENABLE;
- /* enable RX queue */
- FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
- FM10K_WRITE_FLUSH(hw);
+ /* Set PF ownership flag for PF devices */
+ reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
+ if (hw->mac.type == fm10k_mac_pf)
+ reg |= FM10K_RXQCTL_PF;
+ reg |= FM10K_RXQCTL_ENABLE;
+ /* enable RX queue */
+ FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
+ FM10K_WRITE_FLUSH(hw);
- /* Setup the HW Rx Head and Tail Descriptor Pointers
- * Note: this must be done AFTER the queue is enabled
- */
- FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- }
+ /* Setup the HW Rx Head and Tail Descriptor Pointers
+ * Note: this must be done AFTER the queue is enabled
+ */
+ FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
PMD_INIT_FUNC_TRACE();
- if (rx_queue_id < dev->data->nb_rx_queues) {
- /* Disable RX queue */
- rx_queue_disable(hw, rx_queue_id);
+ /* Disable RX queue */
+ rx_queue_disable(hw, rx_queue_id);
- /* Free mbuf and clean HW ring */
- rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
- dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ /* Free mbuf and clean HW ring */
+ rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
/** @todo - this should be defined in the shared code */
#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
- int err = 0;
+ struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
-
- q->ops->reset(q);
+ q->ops->reset(q);
- /* reset head and tail pointers */
- FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
- FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
+ /* reset head and tail pointers */
+ FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
+ FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
- /* enable TX queue */
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
- FM10K_TXDCTL_ENABLE | txdctl);
- FM10K_WRITE_FLUSH(hw);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- } else
- err = -1;
+ /* enable TX queue */
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
+ FM10K_TXDCTL_ENABLE | txdctl);
+ FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
- return err;
+ return 0;
}
static int
PMD_INIT_FUNC_TRACE();
- if (tx_queue_id < dev->data->nb_tx_queues) {
- tx_queue_disable(hw, tx_queue_id);
- tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
- dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
- }
+ tx_queue_disable(hw, tx_queue_id);
+ tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
- /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
- * leave the speed undefined since there is no 50Gbps Ethernet.
- */
- dev->data->dev_link.link_speed = 0;
+ dev->data->dev_link.link_speed = ETH_SPEED_NUM_50G;
dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
dev->data->dev_link.link_status =
dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
+ dev->data->dev_link.link_autoneg = ETH_LINK_FIXED;
return 0;
}
PMD_INIT_FUNC_TRACE();
- dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->tx_offload_capa =
- DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM |
- DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->rx_queue_offload_capa = fm10k_get_rx_queue_offloads_capa(dev);
+ dev_info->rx_offload_capa = fm10k_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa;
+ dev_info->tx_queue_offload_capa = fm10k_get_tx_queue_offloads_capa(dev);
+ dev_info->tx_offload_capa = fm10k_get_tx_port_offloads_capa(dev) |
+ dev_info->tx_queue_offload_capa;
dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
dev_info->reta_size = FM10K_MAX_RSS_INDICES;
},
.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
.rx_drop_en = 0,
+ .offloads = 0,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
},
.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
- .txq_flags = FM10K_SIMPLE_TX_FLAG,
+ .offloads = 0,
};
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
{
if (mask & ETH_VLAN_STRIP_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP))
PMD_INIT_LOG(ERR, "VLAN stripping is "
"always on in fm10k");
}
if (mask & ETH_VLAN_EXTEND_MASK) {
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
PMD_INIT_LOG(ERR, "VLAN QinQ is not "
"supported in fm10k");
}
if (mask & ETH_VLAN_FILTER_MASK) {
- if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER))
PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
}
return 1;
}
+static uint64_t fm10k_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_SCATTER);
+}
+
+static uint64_t fm10k_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_HEADER_SPLIT);
+}
+
static int
fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
+
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
+ q->offloads = offloads;
if (handle_rxconf(q, conf))
return -EINVAL;
return 0;
}
+static uint64_t fm10k_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return 0;
+}
+
+static uint64_t fm10k_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
+{
+ RTE_SET_USED(dev);
+
+ return (uint64_t)(DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO);
+}
+
static int
fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
uint16_t nb_desc, unsigned int socket_id,
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_tx_queue *q;
const struct rte_memzone *mz;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = conf->offloads | dev->data->dev_conf.txmode.offloads;
+
/* make sure a valid number of descriptors have been requested */
if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
FM10K_MULT_TX_DESC, nb_desc)) {
q->nb_desc = nb_desc;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
- q->txq_flags = conf->txq_flags;
+ q->offloads = offloads;
q->ops = &def_txq_ops;
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
}
PMD_INIT_LOG(INFO, "INT: Switch is down");
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/* Handle SRAM error */
/* Setting reset flag */
dev_info->sm_down = 1;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
if (dev_info->sm_down == 1 &&
dev_info->sm_down = 0;
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
- NULL, NULL);
+ NULL);
}
/* Re-enable interrupt from device side */
.tx_queue_setup = fm10k_tx_queue_setup,
.tx_queue_release = fm10k_tx_queue_release,
.rx_descriptor_done = fm10k_dev_rx_descriptor_done,
+ .rx_descriptor_status = fm10k_dev_rx_descriptor_status,
+ .tx_descriptor_status = fm10k_dev_tx_descriptor_status,
.rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
.reta_update = fm10k_reta_update,
uint16_t tx_ftag_en = 0;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- /* primary process has set the ftag flag and txq_flags */
+ /* primary process has set the ftag flag and offloads */
txq = dev->data->tx_queues[0];
if (fm10k_tx_vec_condition_check(txq)) {
dev->tx_pkt_burst = fm10k_xmit_pkts;
RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(fm10k_init_log)
+{
+ fm10k_logtype_init = rte_log_register("pmd.net.fm10k.init");
+ if (fm10k_logtype_init >= 0)
+ rte_log_set_level(fm10k_logtype_init, RTE_LOG_NOTICE);
+ fm10k_logtype_driver = rte_log_register("pmd.net.fm10k.driver");
+ if (fm10k_logtype_driver >= 0)
+ rte_log_set_level(fm10k_logtype_driver, RTE_LOG_NOTICE);
+}