1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
31 #ifndef LINUX_VERSION_CODE
32 #include <linux/version.h>
34 #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
36 #include <linux/init.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/ioport.h>
45 #include <linux/slab.h>
46 #include <linux/list.h>
47 #include <linux/delay.h>
48 #include <linux/sched.h>
51 #include <linux/udp.h>
52 #include <linux/mii.h>
53 #include <linux/vmalloc.h>
55 #include <linux/ethtool.h>
56 #include <linux/if_vlan.h>
58 /* NAPI enable/disable flags here */
59 /* enable NAPI for ixgbe by default */
60 #undef CONFIG_IXGBE_NAPI
61 #define CONFIG_IXGBE_NAPI
63 #ifdef CONFIG_IXGBE_NAPI
66 #endif /* CONFIG_IXGBE_NAPI */
70 #endif /* IXGBE_NAPI */
73 #endif /* IXGBE_NO_NAPI */
75 #define adapter_struct ixgbe_adapter
76 #define adapter_q_vector ixgbe_q_vector
78 /* and finally set defines so that the code sees the changes */
80 #ifndef CONFIG_IXGBE_NAPI
81 #define CONFIG_IXGBE_NAPI
84 #undef CONFIG_IXGBE_NAPI
87 /* packet split disable/enable */
88 #ifdef DISABLE_PACKET_SPLIT
89 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
90 #define CONFIG_IXGBE_DISABLE_PACKET_SPLIT
92 #endif /* DISABLE_PACKET_SPLIT */
94 /* MSI compatibility code for all kernels and drivers */
95 #ifdef DISABLE_PCI_MSI
98 #ifndef CONFIG_PCI_MSI
99 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
101 u16 vector; /* kernel uses to write allocated vector */
102 u16 entry; /* driver uses to specify entry, OS writes */
105 #undef pci_enable_msi
106 #define pci_enable_msi(a) -ENOTSUPP
107 #undef pci_disable_msi
108 #define pci_disable_msi(a) do {} while (0)
109 #undef pci_enable_msix
110 #define pci_enable_msix(a, b, c) -ENOTSUPP
111 #undef pci_disable_msix
112 #define pci_disable_msix(a) do {} while (0)
113 #define msi_remove_pci_irq_vectors(a) do {} while (0)
114 #endif /* CONFIG_PCI_MSI */
119 #ifdef DISABLE_NET_POLL_CONTROLLER
120 #undef CONFIG_NET_POLL_CONTROLLER
124 #define PMSG_SUSPEND 3
127 /* generic boolean compatibility */
133 #if ( GCC_VERSION < 3000 )
140 /* kernels less than 2.4.14 don't have this */
142 #define ETH_P_8021Q 0x8100
146 #define module_param(v,t,p) MODULE_PARM(v, "i");
149 #ifndef DMA_64BIT_MASK
150 #define DMA_64BIT_MASK 0xffffffffffffffffULL
153 #ifndef DMA_32BIT_MASK
154 #define DMA_32BIT_MASK 0x00000000ffffffffULL
157 #ifndef PCI_CAP_ID_EXP
158 #define PCI_CAP_ID_EXP 0x10
161 #ifndef PCIE_LINK_STATE_L0S
162 #define PCIE_LINK_STATE_L0S 1
164 #ifndef PCIE_LINK_STATE_L1
165 #define PCIE_LINK_STATE_L1 2
170 #define mmiowb() asm volatile ("mf.a" ::: "memory")
176 #ifndef SET_NETDEV_DEV
177 #define SET_NETDEV_DEV(net, pdev)
180 #if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
181 #define free_netdev(x) kfree(x)
184 #ifdef HAVE_POLL_CONTROLLER
185 #define CONFIG_NET_POLL_CONTROLLER
188 #ifndef SKB_DATAREF_SHIFT
189 /* if we do not have the infrastructure to detect if skb_header is cloned
190 just return false in all cases */
191 #define skb_header_cloned(x) 0
195 #define gso_size tso_size
196 #define gso_segs tso_segs
200 #define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
201 vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
202 #define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
205 #ifndef NETIF_F_SCTP_CSUM
206 #define NETIF_F_SCTP_CSUM 0
210 #define NETIF_F_LRO (1 << 15)
213 #ifndef NETIF_F_NTUPLE
214 #define NETIF_F_NTUPLE (1 << 27)
218 #define IPPROTO_SCTP 132
221 #ifndef CHECKSUM_PARTIAL
222 #define CHECKSUM_PARTIAL CHECKSUM_HW
223 #define CHECKSUM_COMPLETE CHECKSUM_HW
226 #ifndef __read_mostly
227 #define __read_mostly
231 #define MII_RESV1 0x17 /* Reserved... */
235 #define unlikely(_x) _x
236 #define likely(_x) _x
244 #define PCI_DEVICE(vend,dev) \
245 .vendor = (vend), .device = (dev), \
246 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
250 #define node_online(node) ((node) == 0)
253 #ifndef num_online_cpus
254 #define num_online_cpus() smp_num_cpus
258 #define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
261 #ifndef _LINUX_RANDOM_H
262 #include <linux/random.h>
265 #ifndef DECLARE_BITMAP
266 #ifndef BITS_TO_LONGS
267 #define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
269 #define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
276 #ifndef VLAN_ETH_HLEN
277 #define VLAN_ETH_HLEN 18
280 #ifndef VLAN_ETH_FRAME_LEN
281 #define VLAN_ETH_FRAME_LEN 1518
284 #if !defined(IXGBE_DCA) && !defined(IGB_DCA)
285 #define dca_get_tag(b) 0
286 #define dca_add_requester(a) -1
287 #define dca_remove_requester(b) do { } while(0)
288 #define DCA_PROVIDER_ADD 0x0001
289 #define DCA_PROVIDER_REMOVE 0x0002
292 #ifndef DCA_GET_TAG_TWO_ARGS
293 #define dca3_get_tag(a,b) dca_get_tag(b)
296 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
297 #if defined(__i386__) || defined(__x86_64__)
298 #define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
302 /* taken from 2.6.24 definition in linux/kernel.h */
304 #define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
307 #ifndef NETIF_F_HW_VLAN_TX
308 struct _kc_vlan_ethhdr {
309 unsigned char h_dest[ETH_ALEN];
310 unsigned char h_source[ETH_ALEN];
313 __be16 h_vlan_encapsulated_proto;
315 #define vlan_ethhdr _kc_vlan_ethhdr
316 struct _kc_vlan_hdr {
318 __be16 h_vlan_encapsulated_proto;
320 #define vlan_hdr _kc_vlan_hdr
321 #define vlan_tx_tag_present(_skb) 0
322 #define vlan_tx_tag_get(_skb) 0
325 #ifndef VLAN_PRIO_SHIFT
326 #define VLAN_PRIO_SHIFT 13
334 /*****************************************************************************/
335 /* Installations with ethtool version without eeprom, adapter id, or statistics
338 #ifndef ETH_GSTRING_LEN
339 #define ETH_GSTRING_LEN 32
342 #ifndef ETHTOOL_GSTATS
343 #define ETHTOOL_GSTATS 0x1d
344 #undef ethtool_drvinfo
345 #define ethtool_drvinfo k_ethtool_drvinfo
346 struct k_ethtool_drvinfo {
360 struct ethtool_stats {
365 #endif /* ETHTOOL_GSTATS */
367 #ifndef ETHTOOL_PHYS_ID
368 #define ETHTOOL_PHYS_ID 0x1c
369 #endif /* ETHTOOL_PHYS_ID */
371 #ifndef ETHTOOL_GSTRINGS
372 #define ETHTOOL_GSTRINGS 0x1b
373 enum ethtool_stringset {
377 struct ethtool_gstrings {
378 u32 cmd; /* ETHTOOL_GSTRINGS */
379 u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
380 u32 len; /* number of strings in the string set */
383 #endif /* ETHTOOL_GSTRINGS */
386 #define ETHTOOL_TEST 0x1a
387 enum ethtool_test_flags {
388 ETH_TEST_FL_OFFLINE = (1 << 0),
389 ETH_TEST_FL_FAILED = (1 << 1),
391 struct ethtool_test {
398 #endif /* ETHTOOL_TEST */
400 #ifndef ETHTOOL_GEEPROM
401 #define ETHTOOL_GEEPROM 0xb
403 struct ethtool_eeprom {
411 struct ethtool_value {
415 #endif /* ETHTOOL_GEEPROM */
417 #ifndef ETHTOOL_GLINK
418 #define ETHTOOL_GLINK 0xa
419 #endif /* ETHTOOL_GLINK */
422 #define ETHTOOL_GWOL 0x5
423 #define ETHTOOL_SWOL 0x6
425 struct ethtool_wolinfo {
429 u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
431 #endif /* ETHTOOL_GWOL */
433 #ifndef ETHTOOL_GREGS
434 #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
435 #define ethtool_regs _kc_ethtool_regs
436 /* for passing big chunks of data */
437 struct _kc_ethtool_regs {
439 u32 version; /* driver-specific, indicates different chips/revs */
443 #endif /* ETHTOOL_GREGS */
445 #ifndef ETHTOOL_GMSGLVL
446 #define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
448 #ifndef ETHTOOL_SMSGLVL
449 #define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
451 #ifndef ETHTOOL_NWAY_RST
452 #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
454 #ifndef ETHTOOL_GLINK
455 #define ETHTOOL_GLINK 0x0000000a /* Get link status */
457 #ifndef ETHTOOL_GEEPROM
458 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
460 #ifndef ETHTOOL_SEEPROM
461 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
463 #ifndef ETHTOOL_GCOALESCE
464 #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
465 /* for configuring coalescing parameters of chip */
466 #define ethtool_coalesce _kc_ethtool_coalesce
467 struct _kc_ethtool_coalesce {
468 u32 cmd; /* ETHTOOL_{G,S}COALESCE */
470 /* How many usecs to delay an RX interrupt after
471 * a packet arrives. If 0, only rx_max_coalesced_frames
474 u32 rx_coalesce_usecs;
476 /* How many packets to delay an RX interrupt after
477 * a packet arrives. If 0, only rx_coalesce_usecs is
478 * used. It is illegal to set both usecs and max frames
479 * to zero as this would cause RX interrupts to never be
482 u32 rx_max_coalesced_frames;
484 /* Same as above two parameters, except that these values
485 * apply while an IRQ is being serviced by the host. Not
486 * all cards support this feature and the values are ignored
489 u32 rx_coalesce_usecs_irq;
490 u32 rx_max_coalesced_frames_irq;
492 /* How many usecs to delay a TX interrupt after
493 * a packet is sent. If 0, only tx_max_coalesced_frames
496 u32 tx_coalesce_usecs;
498 /* How many packets to delay a TX interrupt after
499 * a packet is sent. If 0, only tx_coalesce_usecs is
500 * used. It is illegal to set both usecs and max frames
501 * to zero as this would cause TX interrupts to never be
504 u32 tx_max_coalesced_frames;
506 /* Same as above two parameters, except that these values
507 * apply while an IRQ is being serviced by the host. Not
508 * all cards support this feature and the values are ignored
511 u32 tx_coalesce_usecs_irq;
512 u32 tx_max_coalesced_frames_irq;
514 /* How many usecs to delay in-memory statistics
515 * block updates. Some drivers do not have an in-memory
516 * statistic block, and in such cases this value is ignored.
517 * This value must not be zero.
519 u32 stats_block_coalesce_usecs;
521 /* Adaptive RX/TX coalescing is an algorithm implemented by
522 * some drivers to improve latency under low packet rates and
523 * improve throughput under high packet rates. Some drivers
524 * only implement one of RX or TX adaptive coalescing. Anything
525 * not implemented by the driver causes these values to be
528 u32 use_adaptive_rx_coalesce;
529 u32 use_adaptive_tx_coalesce;
531 /* When the packet rate (measured in packets per second)
532 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
536 u32 rx_coalesce_usecs_low;
537 u32 rx_max_coalesced_frames_low;
538 u32 tx_coalesce_usecs_low;
539 u32 tx_max_coalesced_frames_low;
541 /* When the packet rate is below pkt_rate_high but above
542 * pkt_rate_low (both measured in packets per second) the
543 * normal {rx,tx}_* coalescing parameters are used.
546 /* When the packet rate is (measured in packets per second)
547 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
551 u32 rx_coalesce_usecs_high;
552 u32 rx_max_coalesced_frames_high;
553 u32 tx_coalesce_usecs_high;
554 u32 tx_max_coalesced_frames_high;
556 /* How often to do adaptive coalescing packet rate sampling,
557 * measured in seconds. Must not be zero.
559 u32 rate_sample_interval;
561 #endif /* ETHTOOL_GCOALESCE */
563 #ifndef ETHTOOL_SCOALESCE
564 #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
566 #ifndef ETHTOOL_GRINGPARAM
567 #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
568 /* for configuring RX/TX ring parameters */
569 #define ethtool_ringparam _kc_ethtool_ringparam
570 struct _kc_ethtool_ringparam {
571 u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
573 /* Read only attributes. These indicate the maximum number
574 * of pending RX/TX ring entries the driver will allow the
578 u32 rx_mini_max_pending;
579 u32 rx_jumbo_max_pending;
582 /* Values changeable by the user. The valid values are
583 * in the range 1 to the "*_max_pending" counterpart above.
587 u32 rx_jumbo_pending;
590 #endif /* ETHTOOL_GRINGPARAM */
592 #ifndef ETHTOOL_SRINGPARAM
593 #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
595 #ifndef ETHTOOL_GPAUSEPARAM
596 #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
597 /* for configuring link flow control parameters */
598 #define ethtool_pauseparam _kc_ethtool_pauseparam
599 struct _kc_ethtool_pauseparam {
600 u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
602 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
603 * being true) the user may set 'autoneg' here non-zero to have the
604 * pause parameters be auto-negotiated too. In such a case, the
605 * {rx,tx}_pause values below determine what capabilities are
608 * If 'autoneg' is zero or the link is not being auto-negotiated,
609 * then {rx,tx}_pause force the driver to use/not-use pause
616 #endif /* ETHTOOL_GPAUSEPARAM */
618 #ifndef ETHTOOL_SPAUSEPARAM
619 #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
621 #ifndef ETHTOOL_GRXCSUM
622 #define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
624 #ifndef ETHTOOL_SRXCSUM
625 #define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
627 #ifndef ETHTOOL_GTXCSUM
628 #define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
630 #ifndef ETHTOOL_STXCSUM
631 #define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
634 #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
638 #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
639 * (ethtool_value). */
642 #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
644 #ifndef ETHTOOL_GSTRINGS
645 #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
647 #ifndef ETHTOOL_PHYS_ID
648 #define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
650 #ifndef ETHTOOL_GSTATS
651 #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
654 #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
657 #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
660 #ifndef ETHTOOL_BUSINFO_LEN
661 #define ETHTOOL_BUSINFO_LEN 32
664 #ifndef RHEL_RELEASE_CODE
665 /* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
666 #define RHEL_RELEASE_CODE 0
668 #ifndef RHEL_RELEASE_VERSION
669 #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
671 #ifndef AX_RELEASE_CODE
672 #define AX_RELEASE_CODE 0
674 #ifndef AX_RELEASE_VERSION
675 #define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
678 /* SuSE version macro is the same as Linux kernel version */
680 #define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
682 #ifndef SLE_VERSION_CODE
683 #ifdef CONFIG_SUSE_KERNEL
684 /* SLES11 GA is 2.6.27 based */
685 #if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
686 #define SLE_VERSION_CODE SLE_VERSION(11,0,0)
687 #elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
688 /* SLES11 SP1 is 2.6.32 based */
689 #define SLE_VERSION_CODE SLE_VERSION(11,1,0)
691 #define SLE_VERSION_CODE 0
693 #else /* CONFIG_SUSE_KERNEL */
694 #define SLE_VERSION_CODE 0
695 #endif /* CONFIG_SUSE_KERNEL */
696 #endif /* SLE_VERSION_CODE */
701 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
703 #endif /* __KLOCWORK__ */
705 /*****************************************************************************/
707 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
709 /**************************************/
712 #ifndef pci_set_dma_mask
713 #define pci_set_dma_mask _kc_pci_set_dma_mask
714 extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
717 #ifndef pci_request_regions
718 #define pci_request_regions _kc_pci_request_regions
719 extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
722 #ifndef pci_release_regions
723 #define pci_release_regions _kc_pci_release_regions
724 extern void _kc_pci_release_regions(struct pci_dev *pdev);
727 /**************************************/
728 /* NETWORK DRIVER API */
730 #ifndef alloc_etherdev
731 #define alloc_etherdev _kc_alloc_etherdev
732 extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
735 #ifndef is_valid_ether_addr
736 #define is_valid_ether_addr _kc_is_valid_ether_addr
737 extern int _kc_is_valid_ether_addr(u8 *addr);
740 /**************************************/
744 #define INIT_TQUEUE(_tq, _routine, _data) \
746 INIT_LIST_HEAD(&(_tq)->list); \
748 (_tq)->routine = _routine; \
749 (_tq)->data = _data; \
753 #endif /* 2.4.3 => 2.4.0 */
755 /*****************************************************************************/
756 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
757 /* Generic MII registers. */
758 #define MII_BMCR 0x00 /* Basic mode control register */
759 #define MII_BMSR 0x01 /* Basic mode status register */
760 #define MII_PHYSID1 0x02 /* PHYS ID 1 */
761 #define MII_PHYSID2 0x03 /* PHYS ID 2 */
762 #define MII_ADVERTISE 0x04 /* Advertisement control reg */
763 #define MII_LPA 0x05 /* Link partner ability reg */
764 #define MII_EXPANSION 0x06 /* Expansion register */
765 /* Basic mode control register. */
766 #define BMCR_FULLDPLX 0x0100 /* Full duplex */
767 #define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
768 /* Basic mode status register. */
769 #define BMSR_ERCAP 0x0001 /* Ext-reg capability */
770 #define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
771 #define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
772 #define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
773 #define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
774 #define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
775 /* Advertisement control register. */
776 #define ADVERTISE_CSMA 0x0001 /* Only selector supported */
777 #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
778 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
779 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
780 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
781 #define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
782 ADVERTISE_100HALF | ADVERTISE_100FULL)
783 /* Expansion register for auto-negotiation. */
784 #define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
787 /*****************************************************************************/
789 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
791 #ifndef pci_set_power_state
792 #define pci_set_power_state _kc_pci_set_power_state
793 extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
796 #ifndef pci_enable_wake
797 #define pci_enable_wake _kc_pci_enable_wake
798 extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
801 #ifndef pci_disable_device
802 #define pci_disable_device _kc_pci_disable_device
803 extern void _kc_pci_disable_device(struct pci_dev *pdev);
806 /* PCI PM entry point syntax changed, so don't support suspend/resume */
809 #endif /* 2.4.6 => 2.4.3 */
811 #ifndef HAVE_PCI_SET_MWI
812 #define pci_set_mwi(X) pci_write_config_word(X, \
813 PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
814 PCI_COMMAND_INVALIDATE);
815 #define pci_clear_mwi(X) pci_write_config_word(X, \
816 PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
817 ~PCI_COMMAND_INVALIDATE);
820 /*****************************************************************************/
821 /* 2.4.10 => 2.4.9 */
822 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
824 /**************************************/
827 #ifndef MODULE_LICENSE
828 #define MODULE_LICENSE(X)
831 /**************************************/
835 #define min(x,y) ({ \
836 const typeof(x) _x = (x); \
837 const typeof(y) _y = (y); \
838 (void) (&_x == &_y); \
839 _x < _y ? _x : _y; })
842 #define max(x,y) ({ \
843 const typeof(x) _x = (x); \
844 const typeof(y) _y = (y); \
845 (void) (&_x == &_y); \
846 _x > _y ? _x : _y; })
848 #define min_t(type,x,y) ({ \
851 _x < _y ? _x : _y; })
853 #define max_t(type,x,y) ({ \
856 _x > _y ? _x : _y; })
858 #ifndef list_for_each_safe
859 #define list_for_each_safe(pos, n, head) \
860 for (pos = (head)->next, n = pos->next; pos != (head); \
861 pos = n, n = pos->next)
864 #ifndef ____cacheline_aligned_in_smp
866 #define ____cacheline_aligned_in_smp ____cacheline_aligned
868 #define ____cacheline_aligned_in_smp
869 #endif /* CONFIG_SMP */
872 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
873 extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
874 #define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
875 extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
876 #define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
877 #else /* 2.4.8 => 2.4.9 */
878 extern int snprintf(char * buf, size_t size, const char *fmt, ...);
879 extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
881 #endif /* 2.4.10 -> 2.4.6 */
884 /*****************************************************************************/
885 /* 2.4.12 => 2.4.10 */
886 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
887 #ifndef HAVE_NETIF_MSG
888 #define HAVE_NETIF_MSG 1
890 NETIF_MSG_DRV = 0x0001,
891 NETIF_MSG_PROBE = 0x0002,
892 NETIF_MSG_LINK = 0x0004,
893 NETIF_MSG_TIMER = 0x0008,
894 NETIF_MSG_IFDOWN = 0x0010,
895 NETIF_MSG_IFUP = 0x0020,
896 NETIF_MSG_RX_ERR = 0x0040,
897 NETIF_MSG_TX_ERR = 0x0080,
898 NETIF_MSG_TX_QUEUED = 0x0100,
899 NETIF_MSG_INTR = 0x0200,
900 NETIF_MSG_TX_DONE = 0x0400,
901 NETIF_MSG_RX_STATUS = 0x0800,
902 NETIF_MSG_PKTDATA = 0x1000,
903 NETIF_MSG_HW = 0x2000,
904 NETIF_MSG_WOL = 0x4000,
907 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
908 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
909 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
910 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
911 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
912 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
913 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
914 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
915 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
916 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
917 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
918 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
919 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
920 #endif /* !HAVE_NETIF_MSG */
921 #endif /* 2.4.12 => 2.4.10 */
923 /*****************************************************************************/
924 /* 2.4.13 => 2.4.12 */
925 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
927 /**************************************/
928 /* PCI DMA MAPPING */
931 #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
935 #define pci_map_page _kc_pci_map_page
936 extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
939 #ifndef pci_unmap_page
940 #define pci_unmap_page _kc_pci_unmap_page
941 extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
944 /* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
946 #undef DMA_32BIT_MASK
947 #define DMA_32BIT_MASK 0xffffffff
948 #undef DMA_64BIT_MASK
949 #define DMA_64BIT_MASK 0xffffffff
951 /**************************************/
955 #define cpu_relax() rep_nop()
959 unsigned char h_dest[ETH_ALEN];
960 unsigned char h_source[ETH_ALEN];
961 unsigned short h_vlan_proto;
962 unsigned short h_vlan_TCI;
963 unsigned short h_vlan_encapsulated_proto;
965 #endif /* 2.4.13 => 2.4.12 */
967 /*****************************************************************************/
968 /* 2.4.17 => 2.4.12 */
969 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
972 #define __devexit_p(x) &(x)
975 #endif /* 2.4.17 => 2.4.13 */
977 /*****************************************************************************/
978 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
979 #define NETIF_MSG_HW 0x2000
980 #define NETIF_MSG_WOL 0x4000
983 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
985 #ifndef netif_msg_wol
986 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
990 /*****************************************************************************/
992 /*****************************************************************************/
993 /* 2.4.20 => 2.4.19 */
994 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
996 /* we won't support NAPI on less than 2.4.20 */
999 #undef CONFIG_IXGBE_NAPI
1002 #endif /* 2.4.20 => 2.4.19 */
1004 /*****************************************************************************/
1005 /* 2.4.22 => 2.4.17 */
1006 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1007 #define pci_name(x) ((x)->slot_name)
1010 /*****************************************************************************/
1011 /* 2.4.22 => 2.4.17 */
1013 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1014 #ifndef IXGBE_NO_LRO
1015 /* Don't enable LRO for these legacy kernels */
1016 #define IXGBE_NO_LRO
1020 /*****************************************************************************/
1021 /*****************************************************************************/
1022 /* 2.4.23 => 2.4.22 */
1023 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
1024 /*****************************************************************************/
1026 #ifndef netif_poll_disable
1027 #define netif_poll_disable(x) _kc_netif_poll_disable(x)
1028 static inline void _kc_netif_poll_disable(struct net_device *netdev)
1030 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
1032 current->state = TASK_INTERRUPTIBLE;
1033 schedule_timeout(1);
1037 #ifndef netif_poll_enable
1038 #define netif_poll_enable(x) _kc_netif_poll_enable(x)
1039 static inline void _kc_netif_poll_enable(struct net_device *netdev)
1041 clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
1045 #ifndef netif_tx_disable
1046 #define netif_tx_disable(x) _kc_netif_tx_disable(x)
1047 static inline void _kc_netif_tx_disable(struct net_device *dev)
1049 spin_lock_bh(&dev->xmit_lock);
1050 netif_stop_queue(dev);
1051 spin_unlock_bh(&dev->xmit_lock);
1054 #else /* 2.4.23 => 2.4.22 */
1056 #endif /* 2.4.23 => 2.4.22 */
1058 /*****************************************************************************/
1059 /* 2.6.4 => 2.6.0 */
1060 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
1061 ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1062 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
1063 #define ETHTOOL_OPS_COMPAT
1064 #endif /* 2.6.4 => 2.6.0 */
1066 /*****************************************************************************/
1067 /* 2.5.71 => 2.4.x */
1068 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
1069 #define sk_protocol protocol
1070 #define pci_get_device pci_find_device
1071 #endif /* 2.5.70 => 2.4.x */
1073 /*****************************************************************************/
1074 /* < 2.4.27 or 2.6.0 <= 2.6.5 */
1075 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
1076 ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1077 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
1079 #ifndef netif_msg_init
1080 #define netif_msg_init _kc_netif_msg_init
1081 static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
1084 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1085 return default_msg_enable_bits;
1086 if (debug_value == 0) /* no output */
1088 /* set low N bits */
1089 return (1 << debug_value) -1;
1093 #endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
1094 /*****************************************************************************/
1095 #if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
1096 (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
1097 ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
1098 #define netdev_priv(x) x->priv
1101 /*****************************************************************************/
1103 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
1104 #include <linux/rtnetlink.h>
1105 #undef pci_register_driver
1106 #define pci_register_driver pci_module_init
1109 * Most of the dma compat code is copied/modifed from the 2.4.37
1110 * /include/linux/libata-compat.h header file
1112 /* These definitions mirror those in pci.h, so they can be used
1113 * interchangeably with their PCI_ counterparts */
1114 enum dma_data_direction {
1115 DMA_BIDIRECTIONAL = 0,
1117 DMA_FROM_DEVICE = 2,
1122 struct pci_dev pdev;
1125 static inline struct pci_dev *to_pci_dev (struct device *dev)
1127 return (struct pci_dev *) dev;
1129 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1131 return (struct device *) pdev;
1134 #define pdev_printk(lvl, pdev, fmt, args...) \
1135 printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
1136 #define dev_err(dev, fmt, args...) \
1137 pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
1138 #define dev_info(dev, fmt, args...) \
1139 pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
1140 #define dev_warn(dev, fmt, args...) \
1141 pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
1143 /* NOTE: dangerous! we ignore the 'gfp' argument */
1144 #define dma_alloc_coherent(dev,sz,dma,gfp) \
1145 pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
1146 #define dma_free_coherent(dev,sz,addr,dma_addr) \
1147 pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
1149 #define dma_map_page(dev,a,b,c,d) \
1150 pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
1151 #define dma_unmap_page(dev,a,b,c) \
1152 pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
1154 #define dma_map_single(dev,a,b,c) \
1155 pci_map_single(to_pci_dev(dev),(a),(b),(c))
1156 #define dma_unmap_single(dev,a,b,c) \
1157 pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
1159 #define dma_sync_single(dev,a,b,c) \
1160 pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
1162 /* for range just sync everything, that's all the pci API can do */
1163 #define dma_sync_single_range(dev,addr,off,sz,dir) \
1164 pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
1166 #define dma_set_mask(dev,mask) \
1167 pci_set_dma_mask(to_pci_dev(dev),(mask))
1169 /* hlist_* code - double linked lists */
1171 struct hlist_node *first;
1175 struct hlist_node *next, **pprev;
1178 static inline void __hlist_del(struct hlist_node *n)
1180 struct hlist_node *next = n->next;
1181 struct hlist_node **pprev = n->pprev;
1184 next->pprev = pprev;
1187 static inline void hlist_del(struct hlist_node *n)
1194 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1196 struct hlist_node *first = h->first;
1199 first->pprev = &n->next;
1201 n->pprev = &h->first;
1204 static inline int hlist_empty(const struct hlist_head *h)
1208 #define HLIST_HEAD_INIT { .first = NULL }
1209 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1210 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1211 static inline void INIT_HLIST_NODE(struct hlist_node *h)
1216 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
1218 #define hlist_for_each_entry(tpos, pos, head, member) \
1219 for (pos = (head)->first; \
1220 pos && ({ prefetch(pos->next); 1;}) && \
1221 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1224 #define hlist_for_each_entry_safe(tpos, pos, n, head, member) \
1225 for (pos = (head)->first; \
1226 pos && ({ n = pos->next; 1; }) && \
1227 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
1231 #define might_sleep()
1234 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1238 #endif /* <= 2.5.0 */
1240 /*****************************************************************************/
1241 /* 2.5.28 => 2.4.23 */
1242 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
1244 static inline void _kc_synchronize_irq(void)
1248 #undef synchronize_irq
1249 #define synchronize_irq(X) _kc_synchronize_irq()
1251 #include <linux/tqueue.h>
1252 #define work_struct tq_struct
1254 #define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
1256 #define container_of list_entry
1257 #define schedule_work schedule_task
1258 #define flush_scheduled_work flush_scheduled_tasks
1259 #define cancel_work_sync(x) flush_scheduled_work()
1261 #endif /* 2.5.28 => 2.4.17 */
1263 /*****************************************************************************/
1264 /* 2.6.0 => 2.5.28 */
1265 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
1267 #define get_cpu() smp_processor_id()
1269 #define put_cpu() do { } while(0)
1270 #define MODULE_INFO(version, _version)
1271 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1272 #define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
1274 #define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
1276 #define dma_set_coherent_mask(dev,mask) 1
1279 #define dev_put(dev) __dev_put(dev)
1281 #ifndef skb_fill_page_desc
1282 #define skb_fill_page_desc _kc_skb_fill_page_desc
1283 extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
1287 #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
1290 #define page_count(p) atomic_read(&(p)->count)
1296 #define MAX_NUMNODES 1
1298 /* find_first_bit and find_next bit are not defined for most
1299 * 2.4 kernels (except for the redhat 2.4.21 kernels
1301 #include <linux/bitops.h>
1302 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
1303 #undef find_next_bit
1304 #define find_next_bit _kc_find_next_bit
1305 extern unsigned long _kc_find_next_bit(const unsigned long *addr,
1307 unsigned long offset);
1308 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
1312 static inline const char *_kc_netdev_name(const struct net_device *dev)
1314 if (strchr(dev->name, '%'))
1315 return "(unregistered net_device)";
1318 #define netdev_name(netdev) _kc_netdev_name(netdev)
1319 #endif /* netdev_name */
1322 #define strlcpy _kc_strlcpy
1323 extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
1324 #endif /* strlcpy */
1326 #endif /* 2.6.0 => 2.5.28 */
1328 /*****************************************************************************/
1329 /* 2.6.4 => 2.6.0 */
1330 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1331 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
1332 #endif /* 2.6.4 => 2.6.0 */
1334 /*****************************************************************************/
1335 /* 2.6.5 => 2.6.0 */
1336 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
1337 #define dma_sync_single_for_cpu dma_sync_single
1338 #define dma_sync_single_for_device dma_sync_single
1339 #define dma_sync_single_range_for_cpu dma_sync_single_range
1340 #define dma_sync_single_range_for_device dma_sync_single_range
1341 #ifndef pci_dma_mapping_error
1342 #define pci_dma_mapping_error _kc_pci_dma_mapping_error
1343 static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
1345 return dma_addr == 0;
1348 #endif /* 2.6.5 => 2.6.0 */
1350 /*****************************************************************************/
1351 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1352 extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
1353 #define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
1354 #endif /* < 2.6.4 */
1356 /*****************************************************************************/
1357 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
1358 /* taken from 2.6 include/linux/bitmap.h */
1360 #define bitmap_zero _kc_bitmap_zero
1361 static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
1363 if (nbits <= BITS_PER_LONG)
1366 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
1367 memset(dst, 0, len);
1370 #define random_ether_addr _kc_random_ether_addr
1371 static inline void _kc_random_ether_addr(u8 *addr)
1373 get_random_bytes(addr, ETH_ALEN);
1374 addr[0] &= 0xfe; /* clear multicast */
1375 addr[0] |= 0x02; /* set local assignment */
1377 #define page_to_nid(x) 0
1379 #endif /* < 2.6.6 */
1381 /*****************************************************************************/
1382 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
1384 #define if_mii _kc_if_mii
1385 static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
1387 return (struct mii_ioctl_data *) &rq->ifr_ifru;
1393 #endif /* < 2.6.7 */
1395 /*****************************************************************************/
1396 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
1397 #ifndef PCI_EXP_DEVCTL
1398 #define PCI_EXP_DEVCTL 8
1400 #ifndef PCI_EXP_DEVCTL_CERE
1401 #define PCI_EXP_DEVCTL_CERE 0x0001
1403 #define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
1404 schedule_timeout((x * HZ)/1000 + 2); \
1407 #endif /* < 2.6.8 */
1409 /*****************************************************************************/
1410 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
1411 #include <net/dsfield.h>
1415 #define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
1416 extern void *_kc_kzalloc(size_t size, int flags);
1418 #define MSEC_PER_SEC 1000L
1419 static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
1421 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1422 return (MSEC_PER_SEC / HZ) * j;
1423 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1424 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
1426 return (j * MSEC_PER_SEC) / HZ;
1429 static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
1431 if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
1432 return MAX_JIFFY_OFFSET;
1433 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1434 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
1435 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1436 return m * (HZ / MSEC_PER_SEC);
1438 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
1442 #define msleep_interruptible _kc_msleep_interruptible
1443 static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
1445 unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
1447 while (timeout && !signal_pending(current)) {
1448 __set_current_state(TASK_INTERRUPTIBLE);
1449 timeout = schedule_timeout(timeout);
1451 return _kc_jiffies_to_msecs(timeout);
1454 /* Basic mode control register. */
1455 #define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
1476 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
1478 return (struct vlan_ethhdr *)skb->mac.raw;
1481 /* Wake-On-Lan options. */
1482 #define WAKE_PHY (1 << 0)
1483 #define WAKE_UCAST (1 << 1)
1484 #define WAKE_MCAST (1 << 2)
1485 #define WAKE_BCAST (1 << 3)
1486 #define WAKE_ARP (1 << 4)
1487 #define WAKE_MAGIC (1 << 5)
1488 #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
1490 #define skb_header_pointer _kc_skb_header_pointer
1491 static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
1492 int offset, int len, void *buffer)
1494 int hlen = skb_headlen(skb);
1496 if (hlen - offset >= len)
1497 return skb->data + offset;
1499 #ifdef MAX_SKB_FRAGS
1500 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1508 #ifndef NETDEV_TX_OK
1509 #define NETDEV_TX_OK 0
1511 #ifndef NETDEV_TX_BUSY
1512 #define NETDEV_TX_BUSY 1
1514 #ifndef NETDEV_TX_LOCKED
1515 #define NETDEV_TX_LOCKED -1
1522 #endif /* < 2.6.9 */
1524 /*****************************************************************************/
1525 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
1526 #ifdef module_param_array_named
1527 #undef module_param_array_named
1528 #define module_param_array_named(name, array, type, nump, perm) \
1529 static struct kparam_array __param_arr_##name \
1530 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
1531 sizeof(array[0]), array }; \
1532 module_param_call(name, param_array_set, param_array_get, \
1533 &__param_arr_##name, perm)
1534 #endif /* module_param_array_named */
1536 * num_online is broken for all < 2.6.10 kernels. This is needed to support
1537 * Node module parameter of ixgbe.
1539 #undef num_online_nodes
1540 #define num_online_nodes(n) 1
1541 extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
1542 #undef node_online_map
1543 #define node_online_map _kcompat_node_online_map
1544 #endif /* < 2.6.10 */
1546 /*****************************************************************************/
1547 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
1552 #define PCI_D3cold 4
1553 typedef int pci_power_t;
1554 #define pci_choose_state(pdev,state) state
1555 #define PMSG_SUSPEND 3
1556 #define PCI_EXP_LNKCTL 16
1560 #ifndef ARCH_HAS_PREFETCH
1564 #ifndef NET_IP_ALIGN
1565 #define NET_IP_ALIGN 2
1568 #define KC_USEC_PER_SEC 1000000L
1569 #define usecs_to_jiffies _kc_usecs_to_jiffies
1570 static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
1572 #if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1573 return (KC_USEC_PER_SEC / HZ) * j;
1574 #elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1575 return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
1577 return (j * KC_USEC_PER_SEC) / HZ;
1580 static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
1582 if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
1583 return MAX_JIFFY_OFFSET;
1584 #if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1585 return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
1586 #elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1587 return m * (HZ / KC_USEC_PER_SEC);
1589 return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
1592 #endif /* < 2.6.11 */
1594 /*****************************************************************************/
1595 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
1596 #include <linux/reboot.h>
1597 #define USE_REBOOT_NOTIFIER
1599 /* Generic MII registers. */
1600 #define MII_CTRL1000 0x09 /* 1000BASE-T control */
1601 #define MII_STAT1000 0x0a /* 1000BASE-T status */
1602 /* Advertisement control register. */
1603 #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
1604 #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
1605 /* 1000BASE-T Control register */
1606 #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
1607 #ifndef is_zero_ether_addr
1608 #define is_zero_ether_addr _kc_is_zero_ether_addr
1609 static inline int _kc_is_zero_ether_addr(const u8 *addr)
1611 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
1613 #endif /* is_zero_ether_addr */
1614 #ifndef is_multicast_ether_addr
1615 #define is_multicast_ether_addr _kc_is_multicast_ether_addr
1616 static inline int _kc_is_multicast_ether_addr(const u8 *addr)
1618 return addr[0] & 0x01;
1620 #endif /* is_multicast_ether_addr */
1621 #endif /* < 2.6.12 */
1623 /*****************************************************************************/
1624 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
1626 #define kstrdup _kc_kstrdup
1627 extern char *_kc_kstrdup(const char *s, unsigned int gfp);
1629 #endif /* < 2.6.13 */
1631 /*****************************************************************************/
1632 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
1633 #define pm_message_t u32
1635 #define kzalloc _kc_kzalloc
1636 extern void *_kc_kzalloc(size_t size, int flags);
1639 /* Generic MII registers. */
1640 #define MII_ESTATUS 0x0f /* Extended Status */
1641 /* Basic mode status register. */
1642 #define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
1643 /* Extended status register. */
1644 #define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
1645 #define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
1647 #define ADVERTISED_Pause (1 << 13)
1648 #define ADVERTISED_Asym_Pause (1 << 14)
1650 #if (!(RHEL_RELEASE_CODE && \
1651 (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
1652 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
1653 #if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
1654 #define gfp_t unsigned
1656 typedef unsigned gfp_t;
1658 #endif /* !RHEL4.3->RHEL5.0 */
1660 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
1661 #ifdef CONFIG_X86_64
1662 #define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \
1663 dma_sync_single_for_cpu(dev, dma_handle, size, dir)
1664 #define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \
1665 dma_sync_single_for_device(dev, dma_handle, size, dir)
1668 #endif /* < 2.6.14 */
1670 /*****************************************************************************/
1671 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
1672 #ifndef vmalloc_node
1673 #define vmalloc_node(a,b) vmalloc(a)
1674 #endif /* vmalloc_node*/
1676 #define setup_timer(_timer, _function, _data) \
1678 (_timer)->function = _function; \
1679 (_timer)->data = _data; \
1680 init_timer(_timer); \
1682 #ifndef device_can_wakeup
1683 #define device_can_wakeup(dev) (1)
1685 #ifndef device_set_wakeup_enable
1686 #define device_set_wakeup_enable(dev, val) do{}while(0)
1688 #ifndef device_init_wakeup
1689 #define device_init_wakeup(dev,val) do {} while (0)
1691 static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
1693 const u16 *a = (const u16 *) addr1;
1694 const u16 *b = (const u16 *) addr2;
1696 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
1698 #undef compare_ether_addr
1699 #define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
1700 #endif /* < 2.6.15 */
1702 /*****************************************************************************/
1703 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
1705 #define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
1706 #define mutex_lock(x) down_interruptible(x)
1707 #define mutex_unlock(x) up(x)
1709 #ifndef ____cacheline_internodealigned_in_smp
1711 #define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
1713 #define ____cacheline_internodealigned_in_smp
1714 #endif /* CONFIG_SMP */
1715 #endif /* ____cacheline_internodealigned_in_smp */
1717 #else /* 2.6.16 and above */
1719 #define HAVE_PCI_ERS
1720 #endif /* < 2.6.16 */
1722 /*****************************************************************************/
1723 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
1724 #ifndef first_online_node
1725 #define first_online_node 0
1728 #define NET_SKB_PAD 16
1730 #endif /* < 2.6.17 */
1732 /*****************************************************************************/
1733 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
1736 #define irqreturn_t void
1741 #ifndef IRQF_PROBE_SHARED
1743 #define IRQF_PROBE_SHARED SA_PROBEIRQ
1745 #define IRQF_PROBE_SHARED 0
1750 #define IRQF_SHARED SA_SHIRQ
1754 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
1757 #ifndef FIELD_SIZEOF
1758 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
1763 #define skb_is_gso _kc_skb_is_gso
1764 static inline int _kc_skb_is_gso(const struct sk_buff *skb)
1766 return skb_shinfo(skb)->gso_size;
1769 #define skb_is_gso(a) 0
1773 #ifndef resource_size_t
1774 #define resource_size_t unsigned long
1780 #define skb_pad(x,y) _kc_skb_pad(x, y)
1781 int _kc_skb_pad(struct sk_buff *skb, int pad);
1785 #define skb_padto(x,y) _kc_skb_padto(x, y)
1786 static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
1788 unsigned int size = skb->len;
1789 if(likely(size >= len))
1791 return _kc_skb_pad(skb, len - size);
1794 #ifndef DECLARE_PCI_UNMAP_ADDR
1795 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
1796 dma_addr_t ADDR_NAME
1797 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
1799 #define pci_unmap_addr(PTR, ADDR_NAME) \
1801 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
1802 (((PTR)->ADDR_NAME) = (VAL))
1803 #define pci_unmap_len(PTR, LEN_NAME) \
1805 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
1806 (((PTR)->LEN_NAME) = (VAL))
1807 #endif /* DECLARE_PCI_UNMAP_ADDR */
1808 #endif /* < 2.6.18 */
1810 /*****************************************************************************/
1811 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
1813 #ifndef DIV_ROUND_UP
1814 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
1816 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
1817 #if (!((RHEL_RELEASE_CODE && \
1818 ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
1819 RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
1820 (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \
1821 (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0))))
1822 typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
1824 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
1825 #undef CONFIG_INET_LRO
1826 #undef CONFIG_INET_LRO_MODULE
1828 #undef CONFIG_FCOE_MODULE
1830 typedef irqreturn_t (*new_handler_t)(int, void*);
1831 static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1833 typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
1834 typedef void (*new_handler_t)(int, void*);
1835 static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1836 #endif /* >= 2.5.x */
1838 irq_handler_t new_handler = (irq_handler_t) handler;
1839 return request_irq(irq, new_handler, flags, devname, dev_id);
1843 #define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
1845 #define irq_handler_t new_handler_t
1846 /* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
1847 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
1848 #define PCIE_CONFIG_SPACE_LEN 256
1849 #define PCI_CONFIG_SPACE_LEN 64
1850 #define PCIE_LINK_STATUS 0x12
1851 #define pci_config_space_ich8lan() do {} while(0)
1852 #undef pci_save_state
1853 extern int _kc_pci_save_state(struct pci_dev *);
1854 #define pci_save_state(pdev) _kc_pci_save_state(pdev)
1855 #undef pci_restore_state
1856 extern void _kc_pci_restore_state(struct pci_dev *);
1857 #define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
1858 #endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
1862 extern void _kc_free_netdev(struct net_device *);
1863 #define free_netdev(netdev) _kc_free_netdev(netdev)
1865 static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
1869 #define pci_disable_pcie_error_reporting(dev) do {} while (0)
1870 #define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
1872 extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
1873 #define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
1880 #include <linux/aer.h>
1881 #include <linux/string.h>
1882 #endif /* < 2.6.19 */
1884 /*****************************************************************************/
1885 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
1886 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
1888 #define INIT_WORK(_work, _func) \
1890 INIT_LIST_HEAD(&(_work)->entry); \
1891 (_work)->pending = 0; \
1892 (_work)->func = (void (*)(void *))_func; \
1893 (_work)->data = _work; \
1894 init_timer(&(_work)->timer); \
1899 #define PCI_VDEVICE(ven, dev) \
1900 PCI_VENDOR_ID_##ven, (dev), \
1901 PCI_ANY_ID, PCI_ANY_ID, 0, 0
1904 #ifndef round_jiffies
1905 #define round_jiffies(x) x
1908 #define csum_offset csum
1910 #define HAVE_EARLY_VMALLOC_NODE
1911 #define dev_to_node(dev) -1
1913 /* remove compiler warning with b=b, for unused variable */
1914 #define set_dev_node(a, b) do { (b) = (b); } while(0)
1916 #if (!(RHEL_RELEASE_CODE && \
1917 (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
1918 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
1919 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
1920 !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
1921 typedef __u16 __bitwise __sum16;
1922 typedef __u32 __bitwise __wsum;
1925 #if (!(RHEL_RELEASE_CODE && \
1926 (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
1927 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
1928 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
1929 !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
1930 static inline __wsum csum_unfold(__sum16 n)
1932 return (__force __wsum)n;
1936 #else /* < 2.6.20 */
1937 #define HAVE_DEVICE_NUMA_NODE
1938 #endif /* < 2.6.20 */
1940 /*****************************************************************************/
1941 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
1942 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
1943 #define NETDEV_CLASS_DEV
1944 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
1945 #define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
1946 #define vlan_group_set_device(vg, id, dev) \
1948 if (vg) vg->vlan_devices[id] = dev; \
1950 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
1951 #define pci_channel_offline(pdev) (pdev->error_state && \
1952 pdev->error_state != pci_channel_io_normal)
1953 #define pci_request_selected_regions(pdev, bars, name) \
1954 pci_request_regions(pdev, name)
1955 #define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
1956 #endif /* < 2.6.21 */
1958 /*****************************************************************************/
1959 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
1960 #define tcp_hdr(skb) (skb->h.th)
1961 #define tcp_hdrlen(skb) (skb->h.th->doff << 2)
1962 #define skb_transport_offset(skb) (skb->h.raw - skb->data)
1963 #define skb_transport_header(skb) (skb->h.raw)
1964 #define ipv6_hdr(skb) (skb->nh.ipv6h)
1965 #define ip_hdr(skb) (skb->nh.iph)
1966 #define skb_network_offset(skb) (skb->nh.raw - skb->data)
1967 #define skb_network_header(skb) (skb->nh.raw)
1968 #define skb_tail_pointer(skb) skb->tail
1969 #define skb_reset_tail_pointer(skb) \
1971 skb->tail = skb->data; \
1973 #define skb_copy_to_linear_data(skb, from, len) \
1974 memcpy(skb->data, from, len)
1975 #define skb_copy_to_linear_data_offset(skb, offset, from, len) \
1976 memcpy(skb->data + offset, from, len)
1977 #define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
1978 #define pci_register_driver pci_module_init
1979 #define skb_mac_header(skb) skb->mac.raw
1981 #ifdef NETIF_F_MULTI_QUEUE
1982 #ifndef alloc_etherdev_mq
1983 #define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
1985 #endif /* NETIF_F_MULTI_QUEUE */
1988 #define ETH_FCS_LEN 4
1990 #define cancel_work_sync(x) flush_scheduled_work()
1992 #define udp_hdr _udp_hdr
1993 static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
1995 return (struct udphdr *)skb_transport_header(skb);
2002 #define cpu_to_be16(x) __constant_htons(x)
2004 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
2007 DUMP_PREFIX_ADDRESS,
2010 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
2012 #define hex_asc(x) "0123456789abcdef"[x]
2014 #include <linux/ctype.h>
2015 extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
2016 int prefix_type, int rowsize, int groupsize,
2017 const void *buf, size_t len, bool ascii);
2018 #define print_hex_dump(lvl, s, t, r, g, b, l, a) \
2019 _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
2021 #define ETH_TYPE_TRANS_SETS_DEV
2022 #define HAVE_NETDEV_STATS_IN_NETDEV
2023 #endif /* < 2.6.22 */
2025 /*****************************************************************************/
2026 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
2027 #endif /* > 2.6.22 */
2029 /*****************************************************************************/
2030 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
2031 #define netif_subqueue_stopped(_a, _b) 0
2033 #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
2036 #ifndef CONFIG_PM_SLEEP
2037 #define CONFIG_PM_SLEEP CONFIG_PM
2040 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
2041 #define HAVE_ETHTOOL_GET_PERM_ADDR
2042 #endif /* 2.6.14 through 2.6.22 */
2043 #endif /* < 2.6.23 */
2045 /*****************************************************************************/
2046 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
2047 #ifndef ETH_FLAG_LRO
2048 #define ETH_FLAG_LRO NETIF_F_LRO
2051 /* if GRO is supported then the napi struct must already exist */
2053 /* NAPI API changes in 2.6.24 break everything */
2054 struct napi_struct {
2055 /* used to look up the real NAPI polling routine */
2056 int (*poll)(struct napi_struct *, int);
2057 struct net_device *dev;
2063 extern int __kc_adapter_clean(struct net_device *, int *);
2064 extern struct net_device *napi_to_poll_dev(struct napi_struct *napi);
2065 #define netif_napi_add(_netdev, _napi, _poll, _weight) \
2067 struct napi_struct *__napi = (_napi); \
2068 struct net_device *poll_dev = napi_to_poll_dev(__napi); \
2069 poll_dev->poll = &(__kc_adapter_clean); \
2070 poll_dev->priv = (_napi); \
2071 poll_dev->weight = (_weight); \
2072 set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
2073 set_bit(__LINK_STATE_START, &poll_dev->state);\
2074 dev_hold(poll_dev); \
2075 __napi->poll = &(_poll); \
2076 __napi->weight = (_weight); \
2077 __napi->dev = (_netdev); \
2079 #define netif_napi_del(_napi) \
2081 struct net_device *poll_dev = napi_to_poll_dev(_napi); \
2082 WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
2083 dev_put(poll_dev); \
2084 memset(poll_dev, 0, sizeof(struct net_device));\
2086 #define napi_schedule_prep(_napi) \
2087 (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
2088 #define napi_schedule(_napi) \
2090 if (napi_schedule_prep(_napi)) \
2091 __netif_rx_schedule(napi_to_poll_dev(_napi)); \
2093 #define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
2094 #define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
2095 #define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
2097 #define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
2099 #define napi_complete(_napi) \
2101 napi_gro_flush(_napi); \
2102 netif_rx_complete(napi_to_poll_dev(_napi)); \
2104 #endif /* NETIF_F_GRO */
2106 #define netif_napi_add(_netdev, _napi, _poll, _weight) \
2108 struct napi_struct *__napi = _napi; \
2109 _netdev->poll = &(_poll); \
2110 _netdev->weight = (_weight); \
2111 __napi->poll = &(_poll); \
2112 __napi->weight = (_weight); \
2113 __napi->dev = (_netdev); \
2115 #define netif_napi_del(_a) do {} while (0)
2118 #undef dev_get_by_name
2119 #define dev_get_by_name(_a, _b) dev_get_by_name(_b)
2120 #define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
2121 #ifndef DMA_BIT_MASK
2122 #define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
2126 #define skb_is_gso_v6 _kc_skb_is_gso_v6
2127 static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
2129 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2131 #endif /* NETIF_F_TSO6 */
2134 #define KERN_CONT ""
2136 #else /* < 2.6.24 */
2137 #define HAVE_ETHTOOL_GET_SSET_COUNT
2138 #define HAVE_NETDEV_NAPI_LIST
2139 #endif /* < 2.6.24 */
2141 /*****************************************************************************/
2142 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
2143 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
2144 #include <linux/pm_qos_params.h>
2145 #else /* >= 3.2.0 */
2146 #include <linux/pm_qos.h>
2147 #endif /* else >= 3.2.0 */
2148 #endif /* > 2.6.24 */
2150 /*****************************************************************************/
2151 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
2152 #define PM_QOS_CPU_DMA_LATENCY 1
2154 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
2155 #include <linux/latency.h>
2156 #define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
2157 #define pm_qos_add_requirement(pm_qos_class, name, value) \
2158 set_acceptable_latency(name, value)
2159 #define pm_qos_remove_requirement(pm_qos_class, name) \
2160 remove_acceptable_latency(name)
2161 #define pm_qos_update_requirement(pm_qos_class, name, value) \
2162 modify_acceptable_latency(name, value)
2164 #define PM_QOS_DEFAULT_VALUE -1
2165 #define pm_qos_add_requirement(pm_qos_class, name, value)
2166 #define pm_qos_remove_requirement(pm_qos_class, name)
2167 #define pm_qos_update_requirement(pm_qos_class, name, value) { \
2168 if (value != PM_QOS_DEFAULT_VALUE) { \
2169 printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
2170 pci_name(adapter->pdev)); \
2174 #endif /* > 2.6.18 */
2176 #define pci_enable_device_mem(pdev) pci_enable_device(pdev)
2178 #ifndef DEFINE_PCI_DEVICE_TABLE
2179 #define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
2180 #endif /* DEFINE_PCI_DEVICE_TABLE */
2182 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
2183 #ifndef IXGBE_PROCFS
2184 #define IXGBE_PROCFS
2185 #endif /* IXGBE_PROCFS */
2186 #endif /* >= 2.6.0 */
2189 #else /* < 2.6.25 */
2193 #endif /* IXGBE_SYSFS */
2196 #endif /* < 2.6.25 */
2198 /*****************************************************************************/
2199 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
2201 #define clamp_t(type, val, min, max) ({ \
2202 type __val = (val); \
2203 type __min = (min); \
2204 type __max = (max); \
2205 __val = __val < __min ? __min : __val; \
2206 __val > __max ? __max : __val; })
2207 #endif /* clamp_t */
2210 #define netif_set_gso_max_size(_netdev, size) \
2212 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { \
2213 _netdev->features &= ~NETIF_F_TSO; \
2214 _netdev->features &= ~NETIF_F_TSO6; \
2216 _netdev->features |= NETIF_F_TSO; \
2217 _netdev->features |= NETIF_F_TSO6; \
2220 #else /* NETIF_F_TSO6 */
2221 #define netif_set_gso_max_size(_netdev, size) \
2223 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
2224 _netdev->features &= ~NETIF_F_TSO; \
2226 _netdev->features |= NETIF_F_TSO; \
2228 #endif /* NETIF_F_TSO6 */
2230 #define netif_set_gso_max_size(_netdev, size) do {} while (0)
2231 #endif /* NETIF_F_TSO */
2233 #define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
2235 extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
2236 #define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
2237 #else /* < 2.6.26 */
2238 #include <linux/pci-aspm.h>
2239 #define HAVE_NETDEV_VLAN_FEATURES
2240 #endif /* < 2.6.26 */
2241 /*****************************************************************************/
2242 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
2243 static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
2246 ep->speed = (__u16)speed;
2247 /* ep->speed_hi = (__u16)(speed >> 16); */
2249 #define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
2251 static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
2253 /* no speed_hi before 2.6.27, and probably no need for it yet */
2254 return (__u32)ep->speed;
2256 #define ethtool_cmd_speed _kc_ethtool_cmd_speed
2258 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
2259 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
2260 #define ANCIENT_PM 1
2261 #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
2262 (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
2263 defined(CONFIG_PM_SLEEP))
2266 #if defined(ANCIENT_PM) || defined(NEWER_PM)
2267 #undef device_set_wakeup_enable
2268 #define device_set_wakeup_enable(dev, val) \
2271 int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
2273 pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
2276 (dev)->power.can_wakeup = !!(pmc >> 11); \
2277 (dev)->power.should_wakeup = (val && (pmc >> 11)); \
2279 #endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
2280 #endif /* 2.6.15 through 2.6.27 */
2281 #ifndef netif_napi_del
2282 #define netif_napi_del(_a) do {} while (0)
2284 #ifdef CONFIG_NETPOLL
2285 #undef netif_napi_del
2286 #define netif_napi_del(_a) list_del(&(_a)->dev_list);
2289 #endif /* netif_napi_del */
2290 #ifdef dma_mapping_error
2291 #undef dma_mapping_error
2293 #define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
2295 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2300 extern void _kc_netif_tx_stop_all_queues(struct net_device *);
2301 extern void _kc_netif_tx_wake_all_queues(struct net_device *);
2302 extern void _kc_netif_tx_start_all_queues(struct net_device *);
2303 #define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
2304 #define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
2305 #define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
2306 #undef netif_stop_subqueue
2307 #define netif_stop_subqueue(_ndev,_qi) do { \
2308 if (netif_is_multiqueue((_ndev))) \
2309 netif_stop_subqueue((_ndev), (_qi)); \
2311 netif_stop_queue((_ndev)); \
2313 #undef netif_start_subqueue
2314 #define netif_start_subqueue(_ndev,_qi) do { \
2315 if (netif_is_multiqueue((_ndev))) \
2316 netif_start_subqueue((_ndev), (_qi)); \
2318 netif_start_queue((_ndev)); \
2320 #else /* HAVE_TX_MQ */
2321 #define netif_tx_stop_all_queues(a) netif_stop_queue(a)
2322 #define netif_tx_wake_all_queues(a) netif_wake_queue(a)
2323 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
2324 #define netif_tx_start_all_queues(a) netif_start_queue(a)
2326 #define netif_tx_start_all_queues(a) do {} while (0)
2328 #define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
2329 #define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
2330 #endif /* HAVE_TX_MQ */
2331 #ifndef NETIF_F_MULTI_QUEUE
2332 #define NETIF_F_MULTI_QUEUE 0
2333 #define netif_is_multiqueue(a) 0
2334 #define netif_wake_subqueue(a, b)
2335 #endif /* NETIF_F_MULTI_QUEUE */
2337 #ifndef __WARN_printf
2338 extern void __kc_warn_slowpath(const char *file, const int line,
2339 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
2340 #define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
2341 #endif /* __WARN_printf */
2344 #define WARN(condition, format...) ({ \
2345 int __ret_warn_on = !!(condition); \
2346 if (unlikely(__ret_warn_on)) \
2347 __WARN_printf(format); \
2348 unlikely(__ret_warn_on); \
2351 #else /* < 2.6.27 */
2353 #define HAVE_NETDEV_SELECT_QUEUE
2354 #endif /* < 2.6.27 */
2356 /*****************************************************************************/
2357 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
2358 #define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
2359 pci_resource_len(pdev, bar))
2360 #define pci_wake_from_d3 _kc_pci_wake_from_d3
2361 #define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
2362 extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
2363 extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
2364 #define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
2365 #ifndef __skb_queue_head_init
2366 static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
2368 list->prev = list->next = (struct sk_buff *)list;
2371 #define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
2373 #ifndef skb_add_rx_frag
2374 #define skb_add_rx_frag _kc_skb_add_rx_frag
2375 extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int);
2377 #endif /* < 2.6.28 */
2379 /*****************************************************************************/
2380 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
2382 #define swap(a, b) \
2383 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
2385 #define pci_request_selected_regions_exclusive(pdev, bars, name) \
2386 pci_request_selected_regions(pdev, bars, name)
2387 #ifndef CONFIG_NR_CPUS
2388 #define CONFIG_NR_CPUS 1
2389 #endif /* CONFIG_NR_CPUS */
2390 #ifndef pcie_aspm_enabled
2391 #define pcie_aspm_enabled() (1)
2392 #endif /* pcie_aspm_enabled */
2393 #else /* < 2.6.29 */
2394 #ifndef HAVE_NET_DEVICE_OPS
2395 #define HAVE_NET_DEVICE_OPS
2398 #define HAVE_PFC_MODE_ENABLE
2399 #endif /* CONFIG_DCB */
2400 #endif /* < 2.6.29 */
2402 /*****************************************************************************/
2403 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
2404 #define skb_rx_queue_recorded(a) false
2405 #define skb_get_rx_queue(a) 0
2407 #undef CONFIG_FCOE_MODULE
2408 extern u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb);
2409 #define skb_tx_hash(n, s) _kc_skb_tx_hash(n, s)
2410 #define skb_record_rx_queue(a, b) do {} while (0)
2411 #ifndef CONFIG_PCI_IOV
2412 #undef pci_enable_sriov
2413 #define pci_enable_sriov(a, b) -ENOTSUPP
2414 #undef pci_disable_sriov
2415 #define pci_disable_sriov(a) do {} while (0)
2416 #endif /* CONFIG_PCI_IOV */
2418 #define pr_cont(fmt, ...) \
2419 printk(KERN_CONT fmt, ##__VA_ARGS__)
2420 #endif /* pr_cont */
2422 #define HAVE_ASPM_QUIRKS
2423 #endif /* < 2.6.30 */
2425 /*****************************************************************************/
2426 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
2427 #define ETH_P_1588 0x88F7
2428 #define ETH_P_FIP 0x8914
2429 #ifndef netdev_uc_count
2430 #define netdev_uc_count(dev) ((dev)->uc_count)
2432 #ifndef netdev_for_each_uc_addr
2433 #define netdev_for_each_uc_addr(uclist, dev) \
2434 for (uclist = dev->uc_list; uclist; uclist = uclist->next)
2437 #ifndef HAVE_NETDEV_STORAGE_ADDRESS
2438 #define HAVE_NETDEV_STORAGE_ADDRESS
2440 #ifndef HAVE_NETDEV_HW_ADDR
2441 #define HAVE_NETDEV_HW_ADDR
2443 #ifndef HAVE_TRANS_START_IN_QUEUE
2444 #define HAVE_TRANS_START_IN_QUEUE
2446 #endif /* < 2.6.31 */
2448 /*****************************************************************************/
2449 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
2451 #define netdev_tx_t int
2452 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2453 #ifndef NETIF_F_FCOE_MTU
2454 #define NETIF_F_FCOE_MTU (1 << 26)
2456 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2458 #ifndef pm_runtime_get_sync
2459 #define pm_runtime_get_sync(dev) do {} while (0)
2461 #ifndef pm_runtime_put
2462 #define pm_runtime_put(dev) do {} while (0)
2464 #ifndef pm_runtime_put_sync
2465 #define pm_runtime_put_sync(dev) do {} while (0)
2467 #ifndef pm_runtime_resume
2468 #define pm_runtime_resume(dev) do {} while (0)
2470 #ifndef pm_schedule_suspend
2471 #define pm_schedule_suspend(dev, t) do {} while (0)
2473 #ifndef pm_runtime_set_suspended
2474 #define pm_runtime_set_suspended(dev) do {} while (0)
2476 #ifndef pm_runtime_disable
2477 #define pm_runtime_disable(dev) do {} while (0)
2479 #ifndef pm_runtime_put_noidle
2480 #define pm_runtime_put_noidle(dev) do {} while (0)
2482 #ifndef pm_runtime_set_active
2483 #define pm_runtime_set_active(dev) do {} while (0)
2485 #ifndef pm_runtime_enable
2486 #define pm_runtime_enable(dev) do {} while (0)
2488 #ifndef pm_runtime_get_noresume
2489 #define pm_runtime_get_noresume(dev) do {} while (0)
2491 #else /* < 2.6.32 */
2492 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2493 #ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
2494 #define HAVE_NETDEV_OPS_FCOE_ENABLE
2496 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2498 #ifndef HAVE_DCBNL_OPS_GETAPP
2499 #define HAVE_DCBNL_OPS_GETAPP
2501 #endif /* CONFIG_DCB */
2502 #include <linux/pm_runtime.h>
2503 /* IOV bad DMA target work arounds require at least this kernel rev support */
2504 #define HAVE_PCIE_TYPE
2505 #endif /* < 2.6.32 */
2507 /*****************************************************************************/
2508 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
2509 #ifndef pci_pcie_cap
2510 #define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
2513 #define IPV4_FLOW 0x10
2514 #endif /* IPV4_FLOW */
2516 #define IPV6_FLOW 0x11
2517 #endif /* IPV6_FLOW */
2518 /* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
2519 #if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
2520 (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
2521 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2522 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2523 #define HAVE_NETDEV_OPS_FCOE_GETWWN
2525 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2526 #endif /* RHEL6 or SLES11 SP1 */
2529 #endif /* __percpu */
2530 #else /* < 2.6.33 */
2531 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2532 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2533 #define HAVE_NETDEV_OPS_FCOE_GETWWN
2535 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2536 #define HAVE_ETHTOOL_SFP_DISPLAY_PORT
2537 #endif /* < 2.6.33 */
2539 /*****************************************************************************/
2540 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
2541 #ifndef ETH_FLAG_NTUPLE
2542 #define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
2545 #ifndef netdev_mc_count
2546 #define netdev_mc_count(dev) ((dev)->mc_count)
2548 #ifndef netdev_mc_empty
2549 #define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
2551 #ifndef netdev_for_each_mc_addr
2552 #define netdev_for_each_mc_addr(mclist, dev) \
2553 for (mclist = dev->mc_list; mclist; mclist = mclist->next)
2555 #ifndef netdev_uc_count
2556 #define netdev_uc_count(dev) ((dev)->uc.count)
2558 #ifndef netdev_uc_empty
2559 #define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
2561 #ifndef netdev_for_each_uc_addr
2562 #define netdev_for_each_uc_addr(ha, dev) \
2563 list_for_each_entry(ha, &dev->uc.list, list)
2565 #ifndef dma_set_coherent_mask
2566 #define dma_set_coherent_mask(dev,mask) \
2567 pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
2569 #ifndef pci_dev_run_wake
2570 #define pci_dev_run_wake(pdev) (0)
2573 /* netdev logging taken from include/linux/netdevice.h */
2575 static inline const char *_kc_netdev_name(const struct net_device *dev)
2577 if (dev->reg_state != NETREG_REGISTERED)
2578 return "(unregistered net_device)";
2581 #define netdev_name(netdev) _kc_netdev_name(netdev)
2582 #endif /* netdev_name */
2584 #undef netdev_printk
2585 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
2586 #define netdev_printk(level, netdev, format, args...) \
2588 struct adapter_struct *kc_adapter = netdev_priv(netdev);\
2589 struct pci_dev *pdev = kc_adapter->pdev; \
2590 printk("%s %s: " format, level, pci_name(pdev), \
2593 #elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
2594 #define netdev_printk(level, netdev, format, args...) \
2596 struct adapter_struct *kc_adapter = netdev_priv(netdev);\
2597 struct pci_dev *pdev = kc_adapter->pdev; \
2598 struct device *dev = pci_dev_to_dev(pdev); \
2599 dev_printk(level, dev, "%s: " format, \
2600 netdev_name(netdev), ##args); \
2602 #else /* 2.6.21 => 2.6.34 */
2603 #define netdev_printk(level, netdev, format, args...) \
2604 dev_printk(level, (netdev)->dev.parent, \
2606 netdev_name(netdev), ##args)
2607 #endif /* <2.6.0 <2.6.21 <2.6.34 */
2609 #define netdev_emerg(dev, format, args...) \
2610 netdev_printk(KERN_EMERG, dev, format, ##args)
2612 #define netdev_alert(dev, format, args...) \
2613 netdev_printk(KERN_ALERT, dev, format, ##args)
2615 #define netdev_crit(dev, format, args...) \
2616 netdev_printk(KERN_CRIT, dev, format, ##args)
2618 #define netdev_err(dev, format, args...) \
2619 netdev_printk(KERN_ERR, dev, format, ##args)
2621 #define netdev_warn(dev, format, args...) \
2622 netdev_printk(KERN_WARNING, dev, format, ##args)
2623 #undef netdev_notice
2624 #define netdev_notice(dev, format, args...) \
2625 netdev_printk(KERN_NOTICE, dev, format, ##args)
2627 #define netdev_info(dev, format, args...) \
2628 netdev_printk(KERN_INFO, dev, format, ##args)
2631 #define netdev_dbg(__dev, format, args...) \
2632 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2633 #elif defined(CONFIG_DYNAMIC_DEBUG)
2634 #define netdev_dbg(__dev, format, args...) \
2636 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
2637 netdev_name(__dev), ##args); \
2640 #define netdev_dbg(__dev, format, args...) \
2643 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2649 #define netif_printk(priv, type, level, dev, fmt, args...) \
2651 if (netif_msg_##type(priv)) \
2652 netdev_printk(level, (dev), fmt, ##args); \
2656 #define netif_emerg(priv, type, dev, fmt, args...) \
2657 netif_level(emerg, priv, type, dev, fmt, ##args)
2659 #define netif_alert(priv, type, dev, fmt, args...) \
2660 netif_level(alert, priv, type, dev, fmt, ##args)
2662 #define netif_crit(priv, type, dev, fmt, args...) \
2663 netif_level(crit, priv, type, dev, fmt, ##args)
2665 #define netif_err(priv, type, dev, fmt, args...) \
2666 netif_level(err, priv, type, dev, fmt, ##args)
2668 #define netif_warn(priv, type, dev, fmt, args...) \
2669 netif_level(warn, priv, type, dev, fmt, ##args)
2671 #define netif_notice(priv, type, dev, fmt, args...) \
2672 netif_level(notice, priv, type, dev, fmt, ##args)
2674 #define netif_info(priv, type, dev, fmt, args...) \
2675 netif_level(info, priv, type, dev, fmt, ##args)
2677 #ifdef SET_SYSTEM_SLEEP_PM_OPS
2678 #define HAVE_SYSTEM_SLEEP_PM_OPS
2681 #ifndef for_each_set_bit
2682 #define for_each_set_bit(bit, addr, size) \
2683 for ((bit) = find_first_bit((addr), (size)); \
2685 (bit) = find_next_bit((addr), (size), (bit) + 1))
2686 #endif /* for_each_set_bit */
2688 #ifndef DEFINE_DMA_UNMAP_ADDR
2689 #define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
2690 #define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
2691 #define dma_unmap_addr pci_unmap_addr
2692 #define dma_unmap_addr_set pci_unmap_addr_set
2693 #define dma_unmap_len pci_unmap_len
2694 #define dma_unmap_len_set pci_unmap_len_set
2695 #endif /* DEFINE_DMA_UNMAP_ADDR */
2696 #else /* < 2.6.34 */
2697 #define HAVE_SYSTEM_SLEEP_PM_OPS
2698 #ifndef HAVE_SET_RX_MODE
2699 #define HAVE_SET_RX_MODE
2702 #endif /* < 2.6.34 */
2704 /*****************************************************************************/
2705 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
2706 #ifndef numa_node_id
2707 #define numa_node_id() 0
2710 #include <net/sch_generic.h>
2711 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
2712 void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
2713 #define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
2714 #else /* CONFIG_NETDEVICES_MULTI_QUEUE */
2715 #define netif_set_real_num_tx_queues(_netdev, _count) \
2717 (_netdev)->egress_subqueue_count = _count; \
2719 #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
2721 #define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
2722 #endif /* HAVE_TX_MQ */
2723 #ifndef ETH_FLAG_RXHASH
2724 #define ETH_FLAG_RXHASH (1<<28)
2725 #endif /* ETH_FLAG_RXHASH */
2726 #else /* < 2.6.35 */
2727 #define HAVE_PM_QOS_REQUEST_LIST
2728 #define HAVE_IRQ_AFFINITY_HINT
2729 #endif /* < 2.6.35 */
2731 /*****************************************************************************/
2732 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
2733 extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
2734 #define ethtool_op_set_flags _kc_ethtool_op_set_flags
2735 extern u32 _kc_ethtool_op_get_flags(struct net_device *);
2736 #define ethtool_op_get_flags _kc_ethtool_op_get_flags
2738 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2742 #define NET_IP_ALIGN 0
2743 #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
2749 #if (L1_CACHE_BYTES > 32)
2750 #define NET_SKB_PAD L1_CACHE_BYTES
2752 #define NET_SKB_PAD 32
2755 static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
2756 unsigned int length)
2758 struct sk_buff *skb;
2760 skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
2762 #if (NET_IP_ALIGN + NET_SKB_PAD)
2763 skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
2770 #ifdef netdev_alloc_skb_ip_align
2771 #undef netdev_alloc_skb_ip_align
2773 #define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
2776 #define netif_level(level, priv, type, dev, fmt, args...) \
2778 if (netif_msg_##type(priv)) \
2779 netdev_##level(dev, fmt, ##args); \
2783 #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
2785 #else /* < 2.6.36 */
2786 #define HAVE_PM_QOS_REQUEST_ACTIVE
2787 #define HAVE_8021P_SUPPORT
2788 #define HAVE_NDO_GET_STATS64
2789 #endif /* < 2.6.36 */
2791 /*****************************************************************************/
2792 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
2793 #ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
2794 #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
2797 #define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
2798 #endif /* VLAN_N_VID */
2799 #ifndef ETH_FLAG_TXVLAN
2800 #define ETH_FLAG_TXVLAN (1 << 7)
2801 #endif /* ETH_FLAG_TXVLAN */
2802 #ifndef ETH_FLAG_RXVLAN
2803 #define ETH_FLAG_RXVLAN (1 << 8)
2804 #endif /* ETH_FLAG_RXVLAN */
2806 static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
2808 WARN_ON(skb->ip_summed != CHECKSUM_NONE);
2810 #define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
2812 static inline void *_kc_vzalloc_node(unsigned long size, int node)
2814 void *addr = vmalloc_node(size, node);
2816 memset(addr, 0, size);
2819 #define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
2821 static inline void *_kc_vzalloc(unsigned long size)
2823 void *addr = vmalloc(size);
2825 memset(addr, 0, size);
2828 #define vzalloc(_size) _kc_vzalloc(_size)
2830 #ifndef vlan_get_protocol
2831 static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
2833 if (vlan_tx_tag_present(skb) ||
2834 skb->protocol != cpu_to_be16(ETH_P_8021Q))
2835 return skb->protocol;
2837 if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
2840 return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
2842 #define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
2844 #ifdef HAVE_HW_TIME_STAMP
2845 #define SKBTX_HW_TSTAMP (1 << 0)
2846 #define SKBTX_IN_PROGRESS (1 << 2)
2847 #define SKB_SHARED_TX_IS_UNION
2849 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
2850 #ifndef HAVE_VLAN_RX_REGISTER
2851 #define HAVE_VLAN_RX_REGISTER
2853 #endif /* > 2.4.18 */
2854 #endif /* < 2.6.37 */
2856 /*****************************************************************************/
2857 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
2858 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
2859 #define skb_checksum_start_offset(skb) skb_transport_offset(skb)
2860 #else /* 2.6.22 -> 2.6.37 */
2861 static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
2863 return skb->csum_start - skb_headroom(skb);
2865 #define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
2866 #endif /* 2.6.22 -> 2.6.37 */
2868 #ifndef IEEE_8021QAZ_MAX_TCS
2869 #define IEEE_8021QAZ_MAX_TCS 8
2871 #ifndef DCB_CAP_DCBX_HOST
2872 #define DCB_CAP_DCBX_HOST 0x01
2874 #ifndef DCB_CAP_DCBX_LLD_MANAGED
2875 #define DCB_CAP_DCBX_LLD_MANAGED 0x02
2877 #ifndef DCB_CAP_DCBX_VER_CEE
2878 #define DCB_CAP_DCBX_VER_CEE 0x04
2880 #ifndef DCB_CAP_DCBX_VER_IEEE
2881 #define DCB_CAP_DCBX_VER_IEEE 0x08
2883 #ifndef DCB_CAP_DCBX_STATIC
2884 #define DCB_CAP_DCBX_STATIC 0x10
2886 #endif /* CONFIG_DCB */
2887 #else /* < 2.6.38 */
2888 #endif /* < 2.6.38 */
2890 /*****************************************************************************/
2891 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
2892 #ifndef skb_queue_reverse_walk_safe
2893 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
2894 for (skb = (queue)->prev, tmp = skb->prev; \
2895 skb != (struct sk_buff *)(queue); \
2896 skb = tmp, tmp = skb->prev)
2898 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
2899 extern u8 _kc_netdev_get_num_tc(struct net_device *dev);
2900 #define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev)
2901 extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up);
2902 #define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up)
2903 #define netdev_set_prio_tc_map(dev, up, tc) do {} while (0)
2904 #else /* RHEL6.1 or greater */
2907 #endif /* HAVE_MQPRIO */
2909 #ifndef HAVE_DCBNL_IEEE
2910 #define HAVE_DCBNL_IEEE
2911 #ifndef IEEE_8021QAZ_TSA_STRICT
2912 #define IEEE_8021QAZ_TSA_STRICT 0
2914 #ifndef IEEE_8021QAZ_TSA_ETS
2915 #define IEEE_8021QAZ_TSA_ETS 2
2917 #ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE
2918 #define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
2921 #endif /* CONFIG_DCB */
2922 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
2923 #else /* < 2.6.39 */
2924 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2925 #ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
2926 #define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
2928 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2932 #ifndef HAVE_SETUP_TC
2933 #define HAVE_SETUP_TC
2936 #ifndef HAVE_DCBNL_IEEE
2937 #define HAVE_DCBNL_IEEE
2939 #endif /* CONFIG_DCB */
2940 #ifndef HAVE_NDO_SET_FEATURES
2941 #define HAVE_NDO_SET_FEATURES
2943 #endif /* < 2.6.39 */
2945 /*****************************************************************************/
2946 /* use < 2.6.40 because of a Fedora 15 kernel update where they
2947 * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
2948 * like set_phys_id for ethtool.
2950 #undef ETHTOOL_GRXRINGS
2951 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
2952 #ifdef ETHTOOL_GRXRINGS
2954 #define FLOW_EXT 0x80000000
2955 union _kc_ethtool_flow_union {
2956 struct ethtool_tcpip4_spec tcp_ip4_spec;
2957 struct ethtool_usrip4_spec usr_ip4_spec;
2960 struct _kc_ethtool_flow_ext {
2965 struct _kc_ethtool_rx_flow_spec {
2967 union _kc_ethtool_flow_union h_u;
2968 struct _kc_ethtool_flow_ext h_ext;
2969 union _kc_ethtool_flow_union m_u;
2970 struct _kc_ethtool_flow_ext m_ext;
2974 #define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
2975 #endif /* FLOW_EXT */
2978 #define pci_disable_link_state_locked pci_disable_link_state
2980 #ifndef PCI_LTR_VALUE_MASK
2981 #define PCI_LTR_VALUE_MASK 0x000003ff
2983 #ifndef PCI_LTR_SCALE_MASK
2984 #define PCI_LTR_SCALE_MASK 0x00001c00
2986 #ifndef PCI_LTR_SCALE_SHIFT
2987 #define PCI_LTR_SCALE_SHIFT 10
2990 #else /* < 2.6.40 */
2991 #define HAVE_ETHTOOL_SET_PHYS_ID
2992 #endif /* < 2.6.40 */
2994 /*****************************************************************************/
2995 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
2996 #ifndef __netdev_alloc_skb_ip_align
2997 #define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
2998 #endif /* __netdev_alloc_skb_ip_align */
2999 #define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
3000 #define dcb_ieee_delapp(dev, app) 0
3001 #define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
3003 #ifndef HAVE_DCBNL_IEEE_DELAPP
3004 #define HAVE_DCBNL_IEEE_DELAPP
3006 #endif /* < 3.1.0 */
3008 /*****************************************************************************/
3009 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
3010 #ifdef ETHTOOL_GRXRINGS
3011 #define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
3012 #endif /* ETHTOOL_GRXRINGS */
3014 #ifndef skb_frag_size
3015 #define skb_frag_size(frag) _kc_skb_frag_size(frag)
3016 static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
3020 #endif /* skb_frag_size */
3022 #ifndef skb_frag_size_sub
3023 #define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
3024 static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
3026 frag->size -= delta;
3028 #endif /* skb_frag_size_sub */
3030 #ifndef skb_frag_page
3031 #define skb_frag_page(frag) _kc_skb_frag_page(frag)
3032 static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
3036 #endif /* skb_frag_page */
3038 #ifndef skb_frag_address
3039 #define skb_frag_address(frag) _kc_skb_frag_address(frag)
3040 static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
3042 return page_address(skb_frag_page(frag)) + frag->page_offset;
3044 #endif /* skb_frag_address */
3046 #ifndef skb_frag_dma_map
3047 #define skb_frag_dma_map(dev,frag,offset,size,dir) \
3048 _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
3049 static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
3050 const skb_frag_t *frag,
3051 size_t offset, size_t size,
3052 enum dma_data_direction dir)
3054 return dma_map_page(dev, skb_frag_page(frag),
3055 frag->page_offset + offset, size, dir);
3057 #endif /* skb_frag_dma_map */
3059 #ifndef __skb_frag_unref
3060 #define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
3061 static inline void __kc_skb_frag_unref(skb_frag_t *frag)
3063 put_page(skb_frag_page(frag));
3065 #endif /* __skb_frag_unref */
3067 #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
3068 #define HAVE_PCI_DEV_FLAGS_ASSIGNED
3069 #define HAVE_VF_SPOOFCHK_CONFIGURE
3071 #endif /* < 3.2.0 */
3073 #if (RHEL_RELEASE_CODE && \
3074 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \
3075 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
3076 #undef ixgbe_get_netdev_tc_txq
3077 #define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
3080 /*****************************************************************************/
3081 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
3082 typedef u32 netdev_features_t;
3083 #else /* ! < 3.3.0 */
3084 #define HAVE_INT_NDO_VLAN_RX_ADD_VID
3085 #ifdef ETHTOOL_SRXNTUPLE
3086 #undef ETHTOOL_SRXNTUPLE
3088 #endif /* < 3.3.0 */
3090 /*****************************************************************************/
3091 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
3092 #ifndef NETIF_F_RXFCS
3093 #define NETIF_F_RXFCS 0
3094 #endif /* NETIF_F_RXFCS */
3095 #ifndef NETIF_F_RXALL
3096 #define NETIF_F_RXALL 0
3097 #endif /* NETIF_F_RXALL */
3099 #define NUMTCS_RETURNS_U8
3102 #endif /* < 3.4.0 */
3104 /*****************************************************************************/
3105 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
3107 #define HAVE_FDB_OPS
3108 #endif /* < 3.5.0 */
3109 #endif /* _KCOMPAT_H_ */