examples/ipsec-secgw: replace strncpy with strlcpy
[dpdk.git] / drivers / net / e1000 / em_ethdev.c
index f727382..694a624 100644 (file)
@@ -93,6 +93,8 @@ static int em_get_rx_buffer_size(struct e1000_hw *hw);
 static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                          uint32_t index, uint32_t pool);
 static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+static int eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+                                        struct ether_addr *addr);
 
 static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
                                   struct ether_addr *mc_addr_set,
@@ -189,6 +191,7 @@ static const struct eth_dev_ops eth_em_ops = {
        .dev_led_off          = eth_em_led_off,
        .flow_ctrl_get        = eth_em_flow_ctrl_get,
        .flow_ctrl_set        = eth_em_flow_ctrl_set,
+       .mac_addr_set         = eth_em_default_mac_addr_set,
        .mac_addr_add         = eth_em_rar_set,
        .mac_addr_remove      = eth_em_rar_clear,
        .set_mc_addr_list     = eth_em_set_mc_addr_list,
@@ -451,9 +454,29 @@ eth_em_configure(struct rte_eth_dev *dev)
 {
        struct e1000_interrupt *intr =
                E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       struct rte_eth_dev_info dev_info;
+       uint64_t rx_offloads;
+       uint64_t tx_offloads;
 
        PMD_INIT_FUNC_TRACE();
        intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
+       eth_em_infos_get(dev, &dev_info);
+       rx_offloads = dev->data->dev_conf.rxmode.offloads;
+       if ((rx_offloads & dev_info.rx_offload_capa) != rx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Rx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           rx_offloads, dev_info.rx_offload_capa);
+               return -ENOTSUP;
+       }
+       tx_offloads = dev->data->dev_conf.txmode.offloads;
+       if ((tx_offloads & dev_info.tx_offload_capa) != tx_offloads) {
+               PMD_DRV_LOG(ERR, "Some Tx offloads are not supported "
+                           "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                           tx_offloads, dev_info.tx_offload_capa);
+               return -ENOTSUP;
+       }
+
        PMD_INIT_FUNC_TRACE();
 
        return 0;
@@ -1017,9 +1040,11 @@ eth_em_rx_queue_intr_disable(struct rte_eth_dev *dev, __rte_unused uint16_t queu
        return 0;
 }
 
-static uint32_t
-em_get_max_pktlen(const struct e1000_hw *hw)
+uint32_t
+em_get_max_pktlen(struct rte_eth_dev *dev)
 {
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
        switch (hw->mac.type) {
        case e1000_82571:
        case e1000_82572:
@@ -1048,20 +1073,9 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
        struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
-       dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
+       dev_info->max_rx_pktlen = em_get_max_pktlen(dev);
        dev_info->max_mac_addrs = hw->mac.rar_entry_count;
-       dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM  |
-               DEV_RX_OFFLOAD_TCP_CKSUM;
-       dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-               DEV_TX_OFFLOAD_UDP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_CKSUM;
 
        /*
         * Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1083,6 +1097,13 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_queues = 1;
        dev_info->max_tx_queues = 1;
 
+       dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
+       dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+                                   dev_info->rx_queue_offload_capa;
+       dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
+       dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
+                                   dev_info->tx_queue_offload_capa;
+
        dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
                .nb_max = E1000_MAX_RING_DESC,
                .nb_min = E1000_MIN_RING_DESC,
@@ -1100,6 +1121,12 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
                        ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
                        ETH_LINK_SPEED_1G;
+
+       /* Preferred queue parameters */
+       dev_info->default_rxportconf.nb_queues = 1;
+       dev_info->default_txportconf.nb_queues = 1;
+       dev_info->default_txportconf.ring_size = 256;
+       dev_info->default_rxportconf.ring_size = 256;
 }
 
 /* return 0 means link status changed, -1 means not changed */
@@ -1156,7 +1183,7 @@ eth_em_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                link.link_autoneg = !(dev->data->dev_conf.link_speeds &
                                ETH_LINK_SPEED_FIXED);
        } else if (!link_check && (link.link_status == ETH_LINK_UP)) {
-               link.link_speed = 0;
+               link.link_speed = ETH_SPEED_NUM_NONE;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
                link.link_autoneg = ETH_LINK_FIXED;
@@ -1400,15 +1427,18 @@ em_vlan_hw_strip_enable(struct rte_eth_dev *dev)
 static int
 eth_em_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
+       struct rte_eth_rxmode *rxmode;
+
+       rxmode = &dev->data->dev_conf.rxmode;
        if(mask & ETH_VLAN_STRIP_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        em_vlan_hw_strip_enable(dev);
                else
                        em_vlan_hw_strip_disable(dev);
        }
 
        if(mask & ETH_VLAN_FILTER_MASK){
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                        em_vlan_hw_filter_enable(dev);
                else
                        em_vlan_hw_filter_disable(dev);
@@ -1749,6 +1779,15 @@ eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index)
        e1000_rar_set(hw, addr, index);
 }
 
+static int
+eth_em_default_mac_addr_set(struct rte_eth_dev *dev,
+                           struct ether_addr *addr)
+{
+       eth_em_rar_clear(dev, 0);
+
+       return eth_em_rar_set(dev, (void *)addr, 0, 0);
+}
+
 static int
 eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
@@ -1775,10 +1814,12 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        /* switch to jumbo mode if needed */
        if (frame_size > ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               dev->data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= E1000_RCTL_LPE;
        } else {
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               dev->data->dev_conf.rxmode.offloads &=
+                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl &= ~E1000_RCTL_LPE;
        }
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);