X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_ethdev.c;h=ce053b03f37b81d4552b4606e53af279cd9251cf;hb=6a84c37e39750ddd25dde4d1c57f2b93f39dc3c3;hp=068a20c014aa29fbe27571419ce061edbf8f164e;hpb=8bd8bae9c389c1664d095424f5ad1549614cd29c;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index 068a20c014..ce053b03f3 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -37,6 +37,7 @@ #include #include #include +#include #include "fm10k.h" #include "base/fm10k_api.h" @@ -79,6 +80,7 @@ static void fm10k_tx_queue_release(void *queue); static void fm10k_rx_queue_release(void *queue); static void fm10k_set_rx_function(struct rte_eth_dev *dev); static void fm10k_set_tx_function(struct rte_eth_dev *dev); +static int fm10k_check_ftag(struct rte_devargs *devargs); struct fm10k_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -529,8 +531,10 @@ fm10k_dev_rss_configure(struct rte_eth_dev *dev) if (dev->data->nb_rx_queues == 1 || dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS || - dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) + dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) { + FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0); return; + } /* random key is rss_intel_key (default) or user provided (rss_key) */ if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL) @@ -668,6 +672,19 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "failed to disable queue %d", i); return -1; } + /* Enable use of FTAG bit in TX descriptor, PFVTCTL + * register is read-only for VF. + */ + if (fm10k_check_ftag(dev->pci_dev->devargs)) { + if (hw->mac.type == fm10k_mac_pf) { + FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i), + FM10K_PFVTCTL_FTAG_DESC_ENABLE); + PMD_INIT_LOG(DEBUG, "FTAG mode is enabled"); + } else { + PMD_INIT_LOG(ERR, "VF FTAG is not supported."); + return -ENOTSUP; + } + } /* set location and size for descriptor ring */ FM10K_WRITE_REG(hw, FM10K_TDBAL(i), @@ -675,6 +692,9 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev) FM10K_WRITE_REG(hw, FM10K_TDBAH(i), base_addr >> (CHAR_BIT * sizeof(uint32_t))); FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size); + + /* assign default SGLORT for each TX queue */ + FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map); } /* set up vector or scalar TX function as appropriate */ @@ -687,13 +707,16 @@ static int fm10k_dev_rx_init(struct rte_eth_dev *dev) { struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct fm10k_macvlan_filter_info *macvlan; struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; int i, ret; struct fm10k_rx_queue *rxq; uint64_t base_addr; uint32_t size; uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY; + uint32_t logic_port = hw->mac.dglort_map; uint16_t buf_size; + uint16_t queue_stride = 0; /* enable RXINT for interrupt mode */ i = 0; @@ -748,7 +771,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) buf_size -= FM10K_RX_DATABUF_ALIGN; FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), - buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT); + (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) | + FM10K_SRRCTL_LOOPBACK_SUPPRESS); /* It adds dual VLAN length for supporting dual VLAN */ if ((dev->data->dev_conf.rxmode.max_rx_pkt_len + @@ -775,6 +799,18 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev) /* Decide the best RX function */ fm10k_set_rx_function(dev); + /* update RX_SGLORT for loopback suppress*/ + if (hw->mac.type != fm10k_mac_pf) + return 0; + macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private); + if (macvlan->nb_queue_pools) + queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools; + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + if (i && queue_stride && !(i % queue_stride)) + logic_port++; + FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port); + } + return 0; } @@ -1215,13 +1251,54 @@ fm10k_link_update(struct rte_eth_dev *dev, * is no 50Gbps Ethernet. */ dev->data->dev_link.link_speed = 0; dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - dev->data->dev_link.link_status = 1; + dev->data->dev_link.link_status = ETH_LINK_UP; return 0; } +static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + unsigned i, q; + unsigned count = 0; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", fm10k_hw_stats_strings[count].name); + xstats_names[count].id = count; + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + xstats_names[count].id = count; + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + xstats_names[count].id = count; + count++; + } + } + } + return FM10K_NB_XSTATS; +} + static int -fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct fm10k_hw_stats *hw_stats = @@ -1233,8 +1310,6 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Global stats */ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "%s", fm10k_hw_stats_strings[count].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + fm10k_hw_stats_strings[count].offset); count++; @@ -1243,18 +1318,12 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* PF queue stats */ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_%s", q, - fm10k_hw_stats_rx_q_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_rx_q_strings[i].offset); count++; } for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_%s", q, - fm10k_hw_stats_tx_q_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_tx_q_strings[i].offset); @@ -1376,7 +1445,60 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev, .nb_min = FM10K_MIN_TX_DESC, .nb_align = FM10K_MULT_TX_DESC, }; + + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G; +} + +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + if (dev->rx_pkt_burst == fm10k_recv_pkts || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts) { + static uint32_t ptypes[] = { + /* refers to rx_desc_to_ol_flags() */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_UNKNOWN + }; + + return ptypes; + } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec || + dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) { + static uint32_t ptypes_vec[] = { + /* refers to fm10k_desc_to_pktype_v() */ + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_NVGRE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_UNKNOWN + }; + + return ptypes_vec; + } + + return NULL; +} +#else +static const uint32_t * +fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) +{ + return NULL; } +#endif static int fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) @@ -2117,12 +2239,12 @@ fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev) /* Bind all local non-queue interrupt to vector 0 */ int_map |= FM10K_MISC_VEC_ID; - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); /* Enable misc causes */ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) | @@ -2148,12 +2270,12 @@ fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev) int_map |= FM10K_MISC_VEC_ID; - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_Mailbox), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_PCIeFault), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchUpDown), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SwitchEvent), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_SRAM), int_map); - FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_VFLR), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map); + FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map); /* Disable misc causes */ FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) | @@ -2501,29 +2623,16 @@ static const struct fm10k_msg_data fm10k_msgdata_vf[] = { FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; -/* Mailbox message handler in PF */ -static const struct fm10k_msg_data fm10k_msgdata_pf[] = { - FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf), - FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf), - FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf), - FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf), - FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), -}; - static int fm10k_setup_mbx_service(struct fm10k_hw *hw) { - int err; + int err = 0; /* Initialize mailbox lock */ fm10k_mbx_initlock(hw); /* Replace default message handler with new ones */ - if (hw->mac.type == fm10k_mac_pf) - err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_pf); - else + if (hw->mac.type == fm10k_mac_vf) err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf); if (err) { @@ -2553,10 +2662,12 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .allmulticast_disable = fm10k_dev_allmulticast_disable, .stats_get = fm10k_stats_get, .xstats_get = fm10k_xstats_get, + .xstats_get_names = fm10k_xstats_get_names, .stats_reset = fm10k_stats_reset, .xstats_reset = fm10k_stats_reset, .link_update = fm10k_link_update, .dev_infos_get = fm10k_dev_infos_get, + .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get, .vlan_filter_set = fm10k_vlan_filter_set, .vlan_offload_set = fm10k_vlan_offload_set, .mac_addr_add = fm10k_macaddr_add, @@ -2578,15 +2689,57 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .rss_hash_conf_get = fm10k_rss_hash_conf_get, }; +static int ftag_check_handler(__rte_unused const char *key, + const char *value, __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +fm10k_check_ftag(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *ftag_key = "enable_ftag"; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, ftag_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* FTAG is enabled when there's key-value pair: enable_ftag=1 */ + if (rte_kvargs_process(kvlist, ftag_key, + ftag_check_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + static void __attribute__((cold)) fm10k_set_tx_function(struct rte_eth_dev *dev) { struct fm10k_tx_queue *txq; int i; int use_sse = 1; + uint16_t tx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + tx_ftag_en = 1; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; + txq->tx_ftag_en = tx_ftag_en; /* Check if Vector Tx is satisfied */ if (fm10k_tx_vec_condition_check(txq)) { use_sse = 0; @@ -2612,11 +2765,16 @@ fm10k_set_rx_function(struct rte_eth_dev *dev) { struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev); uint16_t i, rx_using_sse; + uint16_t rx_ftag_en = 0; + + if (fm10k_check_ftag(dev->pci_dev->devargs)) + rx_ftag_en = 1; /* In order to allow Vector Rx there are a few configuration * conditions to be met. */ - if (!fm10k_rx_vec_condition_check(dev) && dev_info->rx_vec_allowed) { + if (!fm10k_rx_vec_condition_check(dev) && + dev_info->rx_vec_allowed && !rx_ftag_en) { if (dev->data->scattered_rx) dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec; else @@ -2639,6 +2797,7 @@ fm10k_set_rx_function(struct rte_eth_dev *dev) struct fm10k_rx_queue *rxq = dev->data->rx_queues[i]; rxq->rx_using_sse = rx_using_sse; + rxq->rx_ftag_en = rx_ftag_en; } }