1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
4 Intel(R) Gigabit Ethernet Linux driver
5 Copyright(c) 2007-2013 Intel Corporation.
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11 *******************************************************************************/
14 #include <linux/tcp.h>
18 #include <linux/if_vlan.h>
20 #ifdef CONFIG_IGB_VMDQ_NETDEV
21 int igb_vmdq_open(struct net_device *dev)
23 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
24 struct igb_adapter *adapter = vadapter->real_adapter;
25 struct net_device *main_netdev = adapter->netdev;
26 int hw_queue = vadapter->rx_ring->queue_index +
27 adapter->vfs_allocated_count;
29 if (test_bit(__IGB_DOWN, &adapter->state)) {
31 "Open %s before opening this device.\n",
35 netif_carrier_off(dev);
36 vadapter->tx_ring->vmdq_netdev = dev;
37 vadapter->rx_ring->vmdq_netdev = dev;
38 if (is_valid_ether_addr(dev->dev_addr)) {
39 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
40 igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
42 netif_carrier_on(dev);
46 int igb_vmdq_close(struct net_device *dev)
48 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
49 struct igb_adapter *adapter = vadapter->real_adapter;
50 int hw_queue = vadapter->rx_ring->queue_index +
51 adapter->vfs_allocated_count;
53 netif_carrier_off(dev);
54 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
56 vadapter->tx_ring->vmdq_netdev = NULL;
57 vadapter->rx_ring->vmdq_netdev = NULL;
61 netdev_tx_t igb_vmdq_xmit_frame(struct sk_buff *skb, struct net_device *dev)
63 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
65 return igb_xmit_frame_ring(skb, vadapter->tx_ring);
68 struct net_device_stats *igb_vmdq_get_stats(struct net_device *dev)
70 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
71 struct igb_adapter *adapter = vadapter->real_adapter;
72 struct e1000_hw *hw = &adapter->hw;
73 int hw_queue = vadapter->rx_ring->queue_index +
74 adapter->vfs_allocated_count;
76 vadapter->net_stats.rx_packets +=
77 E1000_READ_REG(hw, E1000_PFVFGPRC(hw_queue));
78 E1000_WRITE_REG(hw, E1000_PFVFGPRC(hw_queue), 0);
79 vadapter->net_stats.tx_packets +=
80 E1000_READ_REG(hw, E1000_PFVFGPTC(hw_queue));
81 E1000_WRITE_REG(hw, E1000_PFVFGPTC(hw_queue), 0);
82 vadapter->net_stats.rx_bytes +=
83 E1000_READ_REG(hw, E1000_PFVFGORC(hw_queue));
84 E1000_WRITE_REG(hw, E1000_PFVFGORC(hw_queue), 0);
85 vadapter->net_stats.tx_bytes +=
86 E1000_READ_REG(hw, E1000_PFVFGOTC(hw_queue));
87 E1000_WRITE_REG(hw, E1000_PFVFGOTC(hw_queue), 0);
88 vadapter->net_stats.multicast +=
89 E1000_READ_REG(hw, E1000_PFVFMPRC(hw_queue));
90 E1000_WRITE_REG(hw, E1000_PFVFMPRC(hw_queue), 0);
91 /* only return the current stats */
92 return &vadapter->net_stats;
96 * igb_write_vm_addr_list - write unicast addresses to RAR table
97 * @netdev: network interface device structure
99 * Writes unicast address list to the RAR table.
100 * Returns: -ENOMEM on failure/insufficient address space
101 * 0 on no addresses written
102 * X on writing X addresses to the RAR table
104 static int igb_write_vm_addr_list(struct net_device *netdev)
106 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
107 struct igb_adapter *adapter = vadapter->real_adapter;
109 int hw_queue = vadapter->rx_ring->queue_index +
110 adapter->vfs_allocated_count;
112 /* return ENOMEM indicating insufficient memory for addresses */
113 if (netdev_uc_count(netdev) > igb_available_rars(adapter))
116 if (!netdev_uc_empty(netdev)) {
117 #ifdef NETDEV_HW_ADDR_T_UNICAST
118 struct netdev_hw_addr *ha;
120 struct dev_mc_list *ha;
122 netdev_for_each_uc_addr(ha, netdev) {
123 #ifdef NETDEV_HW_ADDR_T_UNICAST
124 igb_del_mac_filter(adapter, ha->addr, hw_queue);
125 igb_add_mac_filter(adapter, ha->addr, hw_queue);
127 igb_del_mac_filter(adapter, ha->da_addr, hw_queue);
128 igb_add_mac_filter(adapter, ha->da_addr, hw_queue);
137 #define E1000_VMOLR_UPE 0x20000000 /* Unicast promiscuous mode */
138 void igb_vmdq_set_rx_mode(struct net_device *dev)
140 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
141 struct igb_adapter *adapter = vadapter->real_adapter;
142 struct e1000_hw *hw = &adapter->hw;
144 int hw_queue = vadapter->rx_ring->queue_index +
145 adapter->vfs_allocated_count;
147 /* Check for Promiscuous and All Multicast modes */
148 vmolr = E1000_READ_REG(hw, E1000_VMOLR(hw_queue));
150 /* clear the affected bits */
151 vmolr &= ~(E1000_VMOLR_UPE | E1000_VMOLR_MPME |
152 E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE);
154 if (dev->flags & IFF_PROMISC) {
155 vmolr |= E1000_VMOLR_UPE;
156 rctl = E1000_READ_REG(hw, E1000_RCTL);
157 rctl |= E1000_RCTL_UPE;
158 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
160 rctl = E1000_READ_REG(hw, E1000_RCTL);
161 rctl &= ~E1000_RCTL_UPE;
162 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
163 if (dev->flags & IFF_ALLMULTI) {
164 vmolr |= E1000_VMOLR_MPME;
167 * Write addresses to the MTA, if the attempt fails
168 * then we should just turn on promiscuous mode so
169 * that we can at least receive multicast traffic
171 if (igb_write_mc_addr_list(adapter->netdev) != 0)
172 vmolr |= E1000_VMOLR_ROMPE;
174 #ifdef HAVE_SET_RX_MODE
176 * Write addresses to available RAR registers, if there is not
177 * sufficient space to store all the addresses then enable
178 * unicast promiscuous mode
180 if (igb_write_vm_addr_list(dev) < 0)
181 vmolr |= E1000_VMOLR_UPE;
184 E1000_WRITE_REG(hw, E1000_VMOLR(hw_queue), vmolr);
189 int igb_vmdq_set_mac(struct net_device *dev, void *p)
191 struct sockaddr *addr = p;
192 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
193 struct igb_adapter *adapter = vadapter->real_adapter;
194 int hw_queue = vadapter->rx_ring->queue_index +
195 adapter->vfs_allocated_count;
197 igb_del_mac_filter(adapter, dev->dev_addr, hw_queue);
198 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
199 return igb_add_mac_filter(adapter, dev->dev_addr, hw_queue);
202 int igb_vmdq_change_mtu(struct net_device *dev, int new_mtu)
204 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
205 struct igb_adapter *adapter = vadapter->real_adapter;
207 if (adapter->netdev->mtu < new_mtu) {
209 "Set MTU on %s to >= %d "
210 "before changing MTU on %s\n",
211 adapter->netdev->name, new_mtu, dev->name);
218 void igb_vmdq_tx_timeout(struct net_device *dev)
223 void igb_vmdq_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
225 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
226 struct igb_adapter *adapter = vadapter->real_adapter;
227 struct e1000_hw *hw = &adapter->hw;
228 int hw_queue = vadapter->rx_ring->queue_index +
229 adapter->vfs_allocated_count;
231 vadapter->vlgrp = grp;
233 igb_enable_vlan_tags(adapter);
234 E1000_WRITE_REG(hw, E1000_VMVIR(hw_queue), 0);
238 void igb_vmdq_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
240 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
241 struct igb_adapter *adapter = vadapter->real_adapter;
242 #ifndef HAVE_NETDEV_VLAN_FEATURES
243 struct net_device *v_netdev;
245 int hw_queue = vadapter->rx_ring->queue_index +
246 adapter->vfs_allocated_count;
248 /* attempt to add filter to vlvf array */
249 igb_vlvf_set(adapter, vid, TRUE, hw_queue);
251 #ifndef HAVE_NETDEV_VLAN_FEATURES
253 /* Copy feature flags from netdev to the vlan netdev for this vid.
254 * This allows things like TSO to bubble down to our vlan device.
256 v_netdev = vlan_group_get_device(vadapter->vlgrp, vid);
257 v_netdev->features |= adapter->netdev->features;
258 vlan_group_set_device(vadapter->vlgrp, vid, v_netdev);
263 void igb_vmdq_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
265 struct igb_vmdq_adapter *vadapter = netdev_priv(dev);
266 struct igb_adapter *adapter = vadapter->real_adapter;
267 int hw_queue = vadapter->rx_ring->queue_index +
268 adapter->vfs_allocated_count;
270 vlan_group_set_device(vadapter->vlgrp, vid, NULL);
271 /* remove vlan from VLVF table array */
272 igb_vlvf_set(adapter, vid, FALSE, hw_queue);
278 static int igb_vmdq_get_settings(struct net_device *netdev,
279 struct ethtool_cmd *ecmd)
281 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
282 struct igb_adapter *adapter = vadapter->real_adapter;
283 struct e1000_hw *hw = &adapter->hw;
286 if (hw->phy.media_type == e1000_media_type_copper) {
288 ecmd->supported = (SUPPORTED_10baseT_Half |
289 SUPPORTED_10baseT_Full |
290 SUPPORTED_100baseT_Half |
291 SUPPORTED_100baseT_Full |
292 SUPPORTED_1000baseT_Full|
295 ecmd->advertising = ADVERTISED_TP;
297 if (hw->mac.autoneg == 1) {
298 ecmd->advertising |= ADVERTISED_Autoneg;
299 /* the e1000 autoneg seems to match ethtool nicely */
300 ecmd->advertising |= hw->phy.autoneg_advertised;
303 ecmd->port = PORT_TP;
304 ecmd->phy_address = hw->phy.addr;
306 ecmd->supported = (SUPPORTED_1000baseT_Full |
310 ecmd->advertising = (ADVERTISED_1000baseT_Full |
314 ecmd->port = PORT_FIBRE;
317 ecmd->transceiver = XCVR_INTERNAL;
319 status = E1000_READ_REG(hw, E1000_STATUS);
321 if (status & E1000_STATUS_LU) {
323 if ((status & E1000_STATUS_SPEED_1000) ||
324 hw->phy.media_type != e1000_media_type_copper)
325 ecmd->speed = SPEED_1000;
326 else if (status & E1000_STATUS_SPEED_100)
327 ecmd->speed = SPEED_100;
329 ecmd->speed = SPEED_10;
331 if ((status & E1000_STATUS_FD) ||
332 hw->phy.media_type != e1000_media_type_copper)
333 ecmd->duplex = DUPLEX_FULL;
335 ecmd->duplex = DUPLEX_HALF;
341 ecmd->autoneg = hw->mac.autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
346 static u32 igb_vmdq_get_msglevel(struct net_device *netdev)
348 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
349 struct igb_adapter *adapter = vadapter->real_adapter;
350 return adapter->msg_enable;
353 static void igb_vmdq_get_drvinfo(struct net_device *netdev,
354 struct ethtool_drvinfo *drvinfo)
356 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
357 struct igb_adapter *adapter = vadapter->real_adapter;
358 struct net_device *main_netdev = adapter->netdev;
360 strncpy(drvinfo->driver, igb_driver_name, 32);
361 strncpy(drvinfo->version, igb_driver_version, 32);
363 strncpy(drvinfo->fw_version, "N/A", 4);
364 snprintf(drvinfo->bus_info, 32, "%s VMDQ %d", main_netdev->name,
365 vadapter->rx_ring->queue_index);
366 drvinfo->n_stats = 0;
367 drvinfo->testinfo_len = 0;
368 drvinfo->regdump_len = 0;
371 static void igb_vmdq_get_ringparam(struct net_device *netdev,
372 struct ethtool_ringparam *ring)
374 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
376 struct igb_ring *tx_ring = vadapter->tx_ring;
377 struct igb_ring *rx_ring = vadapter->rx_ring;
379 ring->rx_max_pending = IGB_MAX_RXD;
380 ring->tx_max_pending = IGB_MAX_TXD;
381 ring->rx_mini_max_pending = 0;
382 ring->rx_jumbo_max_pending = 0;
383 ring->rx_pending = rx_ring->count;
384 ring->tx_pending = tx_ring->count;
385 ring->rx_mini_pending = 0;
386 ring->rx_jumbo_pending = 0;
388 static u32 igb_vmdq_get_rx_csum(struct net_device *netdev)
390 struct igb_vmdq_adapter *vadapter = netdev_priv(netdev);
391 struct igb_adapter *adapter = vadapter->real_adapter;
393 return test_bit(IGB_RING_FLAG_RX_CSUM, &adapter->rx_ring[0]->flags);
397 static struct ethtool_ops igb_vmdq_ethtool_ops = {
398 .get_settings = igb_vmdq_get_settings,
399 .get_drvinfo = igb_vmdq_get_drvinfo,
400 .get_link = ethtool_op_get_link,
401 .get_ringparam = igb_vmdq_get_ringparam,
402 .get_rx_csum = igb_vmdq_get_rx_csum,
403 .get_tx_csum = ethtool_op_get_tx_csum,
404 .get_sg = ethtool_op_get_sg,
405 .set_sg = ethtool_op_set_sg,
406 .get_msglevel = igb_vmdq_get_msglevel,
408 .get_tso = ethtool_op_get_tso,
410 #ifdef HAVE_ETHTOOL_GET_PERM_ADDR
411 .get_perm_addr = ethtool_op_get_perm_addr,
415 void igb_vmdq_set_ethtool_ops(struct net_device *netdev)
417 SET_ETHTOOL_OPS(netdev, &igb_vmdq_ethtool_ops);
421 #endif /* CONFIG_IGB_VMDQ_NETDEV */