1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
31 #ifndef LINUX_VERSION_CODE
32 #include <linux/version.h>
34 #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
36 #include <linux/init.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/ioport.h>
45 #include <linux/slab.h>
46 #include <linux/list.h>
47 #include <linux/delay.h>
48 #include <linux/sched.h>
51 #include <linux/udp.h>
52 #include <linux/mii.h>
53 #include <linux/vmalloc.h>
55 #include <linux/ethtool.h>
56 #include <linux/if_vlan.h>
58 /* NAPI enable/disable flags here */
61 #define adapter_struct igb_adapter
62 #define adapter_q_vector igb_q_vector
65 /* and finally set defines so that the code sees the changes */
70 /* packet split disable/enable */
71 #ifdef DISABLE_PACKET_SPLIT
72 #ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
73 #define CONFIG_IGB_DISABLE_PACKET_SPLIT
75 #endif /* DISABLE_PACKET_SPLIT */
77 /* MSI compatibility code for all kernels and drivers */
78 #ifdef DISABLE_PCI_MSI
81 #ifndef CONFIG_PCI_MSI
82 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
84 u16 vector; /* kernel uses to write allocated vector */
85 u16 entry; /* driver uses to specify entry, OS writes */
89 #define pci_enable_msi(a) -ENOTSUPP
90 #undef pci_disable_msi
91 #define pci_disable_msi(a) do {} while (0)
92 #undef pci_enable_msix
93 #define pci_enable_msix(a, b, c) -ENOTSUPP
94 #undef pci_disable_msix
95 #define pci_disable_msix(a) do {} while (0)
96 #define msi_remove_pci_irq_vectors(a) do {} while (0)
97 #endif /* CONFIG_PCI_MSI */
102 #ifdef DISABLE_NET_POLL_CONTROLLER
103 #undef CONFIG_NET_POLL_CONTROLLER
107 #define PMSG_SUSPEND 3
110 /* generic boolean compatibility */
116 #if ( GCC_VERSION < 3000 )
123 /* kernels less than 2.4.14 don't have this */
125 #define ETH_P_8021Q 0x8100
129 #define module_param(v,t,p) MODULE_PARM(v, "i");
132 #ifndef DMA_64BIT_MASK
133 #define DMA_64BIT_MASK 0xffffffffffffffffULL
136 #ifndef DMA_32BIT_MASK
137 #define DMA_32BIT_MASK 0x00000000ffffffffULL
140 #ifndef PCI_CAP_ID_EXP
141 #define PCI_CAP_ID_EXP 0x10
144 #ifndef PCIE_LINK_STATE_L0S
145 #define PCIE_LINK_STATE_L0S 1
147 #ifndef PCIE_LINK_STATE_L1
148 #define PCIE_LINK_STATE_L1 2
153 #define mmiowb() asm volatile ("mf.a" ::: "memory")
159 #ifndef SET_NETDEV_DEV
160 #define SET_NETDEV_DEV(net, pdev)
163 #if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
164 #define free_netdev(x) kfree(x)
167 #ifdef HAVE_POLL_CONTROLLER
168 #define CONFIG_NET_POLL_CONTROLLER
171 #ifndef SKB_DATAREF_SHIFT
172 /* if we do not have the infrastructure to detect if skb_header is cloned
173 just return false in all cases */
174 #define skb_header_cloned(x) 0
178 #define gso_size tso_size
179 #define gso_segs tso_segs
183 #define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
184 vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
185 #define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
188 #ifndef NETIF_F_SCTP_CSUM
189 #define NETIF_F_SCTP_CSUM 0
193 #define NETIF_F_LRO (1 << 15)
196 #ifndef NETIF_F_NTUPLE
197 #define NETIF_F_NTUPLE (1 << 27)
201 #define IPPROTO_SCTP 132
204 #ifndef CHECKSUM_PARTIAL
205 #define CHECKSUM_PARTIAL CHECKSUM_HW
206 #define CHECKSUM_COMPLETE CHECKSUM_HW
209 #ifndef __read_mostly
210 #define __read_mostly
214 #define MII_RESV1 0x17 /* Reserved... */
218 #define unlikely(_x) _x
219 #define likely(_x) _x
227 #define PCI_DEVICE(vend,dev) \
228 .vendor = (vend), .device = (dev), \
229 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
233 #define node_online(node) ((node) == 0)
236 #ifndef num_online_cpus
237 #define num_online_cpus() smp_num_cpus
241 #define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
244 #ifndef _LINUX_RANDOM_H
245 #include <linux/random.h>
248 #ifndef DECLARE_BITMAP
249 #ifndef BITS_TO_LONGS
250 #define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
252 #define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
259 #ifndef VLAN_ETH_HLEN
260 #define VLAN_ETH_HLEN 18
263 #ifndef VLAN_ETH_FRAME_LEN
264 #define VLAN_ETH_FRAME_LEN 1518
267 #if !defined(IXGBE_DCA) && !defined(IGB_DCA)
268 #define dca_get_tag(b) 0
269 #define dca_add_requester(a) -1
270 #define dca_remove_requester(b) do { } while(0)
271 #define DCA_PROVIDER_ADD 0x0001
272 #define DCA_PROVIDER_REMOVE 0x0002
275 #ifndef DCA_GET_TAG_TWO_ARGS
276 #define dca3_get_tag(a,b) dca_get_tag(b)
279 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
280 #if defined(__i386__) || defined(__x86_64__)
281 #define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
285 /* taken from 2.6.24 definition in linux/kernel.h */
287 #define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0)
292 #undef __ARG_PLACEHOLDER_1
293 #undef config_enabled
294 #undef _config_enabled
295 #undef __config_enabled
296 #undef ___config_enabled
299 #define __ARG_PLACEHOLDER_1 0,
300 #define config_enabled(cfg) _config_enabled(cfg)
301 #define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
302 #define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
303 #define ___config_enabled(__ignored, val, ...) val
305 #define IS_ENABLED(option) \
306 (config_enabled(option) || config_enabled(option##_MODULE))
308 #if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
309 struct _kc_vlan_ethhdr {
310 unsigned char h_dest[ETH_ALEN];
311 unsigned char h_source[ETH_ALEN];
314 __be16 h_vlan_encapsulated_proto;
316 #define vlan_ethhdr _kc_vlan_ethhdr
317 struct _kc_vlan_hdr {
319 __be16 h_vlan_encapsulated_proto;
321 #define vlan_hdr _kc_vlan_hdr
322 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
323 #define vlan_tx_tag_present(_skb) 0
324 #define vlan_tx_tag_get(_skb) 0
326 #endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
328 #ifndef VLAN_PRIO_SHIFT
329 #define VLAN_PRIO_SHIFT 13
341 /*****************************************************************************/
342 /* Installations with ethtool version without eeprom, adapter id, or statistics
345 #ifndef ETH_GSTRING_LEN
346 #define ETH_GSTRING_LEN 32
349 #ifndef ETHTOOL_GSTATS
350 #define ETHTOOL_GSTATS 0x1d
351 #undef ethtool_drvinfo
352 #define ethtool_drvinfo k_ethtool_drvinfo
353 struct k_ethtool_drvinfo {
367 struct ethtool_stats {
372 #endif /* ETHTOOL_GSTATS */
374 #ifndef ETHTOOL_PHYS_ID
375 #define ETHTOOL_PHYS_ID 0x1c
376 #endif /* ETHTOOL_PHYS_ID */
378 #ifndef ETHTOOL_GSTRINGS
379 #define ETHTOOL_GSTRINGS 0x1b
380 enum ethtool_stringset {
384 struct ethtool_gstrings {
385 u32 cmd; /* ETHTOOL_GSTRINGS */
386 u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
387 u32 len; /* number of strings in the string set */
390 #endif /* ETHTOOL_GSTRINGS */
393 #define ETHTOOL_TEST 0x1a
394 enum ethtool_test_flags {
395 ETH_TEST_FL_OFFLINE = (1 << 0),
396 ETH_TEST_FL_FAILED = (1 << 1),
398 struct ethtool_test {
405 #endif /* ETHTOOL_TEST */
407 #ifndef ETHTOOL_GEEPROM
408 #define ETHTOOL_GEEPROM 0xb
410 struct ethtool_eeprom {
418 struct ethtool_value {
422 #endif /* ETHTOOL_GEEPROM */
424 #ifndef ETHTOOL_GLINK
425 #define ETHTOOL_GLINK 0xa
426 #endif /* ETHTOOL_GLINK */
429 #define ETHTOOL_GWOL 0x5
430 #define ETHTOOL_SWOL 0x6
432 struct ethtool_wolinfo {
436 u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
438 #endif /* ETHTOOL_GWOL */
440 #ifndef ETHTOOL_GREGS
441 #define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */
442 #define ethtool_regs _kc_ethtool_regs
443 /* for passing big chunks of data */
444 struct _kc_ethtool_regs {
446 u32 version; /* driver-specific, indicates different chips/revs */
450 #endif /* ETHTOOL_GREGS */
452 #ifndef ETHTOOL_GMSGLVL
453 #define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
455 #ifndef ETHTOOL_SMSGLVL
456 #define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */
458 #ifndef ETHTOOL_NWAY_RST
459 #define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */
461 #ifndef ETHTOOL_GLINK
462 #define ETHTOOL_GLINK 0x0000000a /* Get link status */
464 #ifndef ETHTOOL_GEEPROM
465 #define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
467 #ifndef ETHTOOL_SEEPROM
468 #define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */
470 #ifndef ETHTOOL_GCOALESCE
471 #define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
472 /* for configuring coalescing parameters of chip */
473 #define ethtool_coalesce _kc_ethtool_coalesce
474 struct _kc_ethtool_coalesce {
475 u32 cmd; /* ETHTOOL_{G,S}COALESCE */
477 /* How many usecs to delay an RX interrupt after
478 * a packet arrives. If 0, only rx_max_coalesced_frames
481 u32 rx_coalesce_usecs;
483 /* How many packets to delay an RX interrupt after
484 * a packet arrives. If 0, only rx_coalesce_usecs is
485 * used. It is illegal to set both usecs and max frames
486 * to zero as this would cause RX interrupts to never be
489 u32 rx_max_coalesced_frames;
491 /* Same as above two parameters, except that these values
492 * apply while an IRQ is being serviced by the host. Not
493 * all cards support this feature and the values are ignored
496 u32 rx_coalesce_usecs_irq;
497 u32 rx_max_coalesced_frames_irq;
499 /* How many usecs to delay a TX interrupt after
500 * a packet is sent. If 0, only tx_max_coalesced_frames
503 u32 tx_coalesce_usecs;
505 /* How many packets to delay a TX interrupt after
506 * a packet is sent. If 0, only tx_coalesce_usecs is
507 * used. It is illegal to set both usecs and max frames
508 * to zero as this would cause TX interrupts to never be
511 u32 tx_max_coalesced_frames;
513 /* Same as above two parameters, except that these values
514 * apply while an IRQ is being serviced by the host. Not
515 * all cards support this feature and the values are ignored
518 u32 tx_coalesce_usecs_irq;
519 u32 tx_max_coalesced_frames_irq;
521 /* How many usecs to delay in-memory statistics
522 * block updates. Some drivers do not have an in-memory
523 * statistic block, and in such cases this value is ignored.
524 * This value must not be zero.
526 u32 stats_block_coalesce_usecs;
528 /* Adaptive RX/TX coalescing is an algorithm implemented by
529 * some drivers to improve latency under low packet rates and
530 * improve throughput under high packet rates. Some drivers
531 * only implement one of RX or TX adaptive coalescing. Anything
532 * not implemented by the driver causes these values to be
535 u32 use_adaptive_rx_coalesce;
536 u32 use_adaptive_tx_coalesce;
538 /* When the packet rate (measured in packets per second)
539 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
543 u32 rx_coalesce_usecs_low;
544 u32 rx_max_coalesced_frames_low;
545 u32 tx_coalesce_usecs_low;
546 u32 tx_max_coalesced_frames_low;
548 /* When the packet rate is below pkt_rate_high but above
549 * pkt_rate_low (both measured in packets per second) the
550 * normal {rx,tx}_* coalescing parameters are used.
553 /* When the packet rate is (measured in packets per second)
554 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
558 u32 rx_coalesce_usecs_high;
559 u32 rx_max_coalesced_frames_high;
560 u32 tx_coalesce_usecs_high;
561 u32 tx_max_coalesced_frames_high;
563 /* How often to do adaptive coalescing packet rate sampling,
564 * measured in seconds. Must not be zero.
566 u32 rate_sample_interval;
568 #endif /* ETHTOOL_GCOALESCE */
570 #ifndef ETHTOOL_SCOALESCE
571 #define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
573 #ifndef ETHTOOL_GRINGPARAM
574 #define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
575 /* for configuring RX/TX ring parameters */
576 #define ethtool_ringparam _kc_ethtool_ringparam
577 struct _kc_ethtool_ringparam {
578 u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
580 /* Read only attributes. These indicate the maximum number
581 * of pending RX/TX ring entries the driver will allow the
585 u32 rx_mini_max_pending;
586 u32 rx_jumbo_max_pending;
589 /* Values changeable by the user. The valid values are
590 * in the range 1 to the "*_max_pending" counterpart above.
594 u32 rx_jumbo_pending;
597 #endif /* ETHTOOL_GRINGPARAM */
599 #ifndef ETHTOOL_SRINGPARAM
600 #define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */
602 #ifndef ETHTOOL_GPAUSEPARAM
603 #define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
604 /* for configuring link flow control parameters */
605 #define ethtool_pauseparam _kc_ethtool_pauseparam
606 struct _kc_ethtool_pauseparam {
607 u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
609 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
610 * being true) the user may set 'autoneg' here non-zero to have the
611 * pause parameters be auto-negotiated too. In such a case, the
612 * {rx,tx}_pause values below determine what capabilities are
615 * If 'autoneg' is zero or the link is not being auto-negotiated,
616 * then {rx,tx}_pause force the driver to use/not-use pause
623 #endif /* ETHTOOL_GPAUSEPARAM */
625 #ifndef ETHTOOL_SPAUSEPARAM
626 #define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
628 #ifndef ETHTOOL_GRXCSUM
629 #define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
631 #ifndef ETHTOOL_SRXCSUM
632 #define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
634 #ifndef ETHTOOL_GTXCSUM
635 #define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
637 #ifndef ETHTOOL_STXCSUM
638 #define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
641 #define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
645 #define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
646 * (ethtool_value). */
649 #define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */
651 #ifndef ETHTOOL_GSTRINGS
652 #define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
654 #ifndef ETHTOOL_PHYS_ID
655 #define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
657 #ifndef ETHTOOL_GSTATS
658 #define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
661 #define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
664 #define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
667 #ifndef ETHTOOL_BUSINFO_LEN
668 #define ETHTOOL_BUSINFO_LEN 32
671 #ifndef RHEL_RELEASE_VERSION
672 #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
674 #ifndef AX_RELEASE_VERSION
675 #define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
678 #ifndef AX_RELEASE_CODE
679 #define AX_RELEASE_CODE 0
682 #if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
683 #define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
684 #elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
685 #define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
686 #elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
687 #define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
690 #ifndef RHEL_RELEASE_CODE
691 /* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
692 #define RHEL_RELEASE_CODE 0
695 /* SuSE version macro is the same as Linux kernel version */
697 #define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
699 #ifdef CONFIG_SUSE_KERNEL
700 #if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
701 /* SLES11 GA is 2.6.27 based */
702 #define SLE_VERSION_CODE SLE_VERSION(11,0,0)
703 #elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
704 /* SLES11 SP1 is 2.6.32 based */
705 #define SLE_VERSION_CODE SLE_VERSION(11,1,0)
706 #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
707 (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
708 /* SLES11 SP3 is at least 3.0.61+ based */
709 #define SLE_VERSION_CODE SLE_VERSION(11,3,0)
710 #elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,28) )
711 /* SLES12 is at least 3.12.28+ based */
712 #define SLE_VERSION_CODE SLE_VERSION(12,0,0)
713 #endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
714 #endif /* CONFIG_SUSE_KERNEL */
715 #ifndef SLE_VERSION_CODE
716 #define SLE_VERSION_CODE 0
717 #endif /* SLE_VERSION_CODE */
719 /* Ubuntu release and kernel codes must be specified from Makefile */
720 #ifndef UBUNTU_RELEASE_VERSION
721 #define UBUNTU_RELEASE_VERSION(a,b) (((a) * 100) + (b))
723 #ifndef UBUNTU_KERNEL_VERSION
724 #define UBUNTU_KERNEL_VERSION(a,b,c,abi,upload) (((a) << 40) + ((b) << 32) + ((c) << 24) + ((abi) << 8) + (upload))
726 #ifndef UBUNTU_RELEASE_CODE
727 #define UBUNTU_RELEASE_CODE 0
729 #ifndef UBUNTU_KERNEL_CODE
730 #define UBUNTU_KERNEL_CODE 0
736 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
738 #endif /* __KLOCWORK__ */
740 /*****************************************************************************/
742 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
744 /**************************************/
747 #ifndef pci_set_dma_mask
748 #define pci_set_dma_mask _kc_pci_set_dma_mask
749 extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
752 #ifndef pci_request_regions
753 #define pci_request_regions _kc_pci_request_regions
754 extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
757 #ifndef pci_release_regions
758 #define pci_release_regions _kc_pci_release_regions
759 extern void _kc_pci_release_regions(struct pci_dev *pdev);
762 /**************************************/
763 /* NETWORK DRIVER API */
765 #ifndef alloc_etherdev
766 #define alloc_etherdev _kc_alloc_etherdev
767 extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
770 #ifndef is_valid_ether_addr
771 #define is_valid_ether_addr _kc_is_valid_ether_addr
772 extern int _kc_is_valid_ether_addr(u8 *addr);
775 /**************************************/
779 #define INIT_TQUEUE(_tq, _routine, _data) \
781 INIT_LIST_HEAD(&(_tq)->list); \
783 (_tq)->routine = _routine; \
784 (_tq)->data = _data; \
788 #endif /* 2.4.3 => 2.4.0 */
790 /*****************************************************************************/
791 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
792 /* Generic MII registers. */
793 #define MII_BMCR 0x00 /* Basic mode control register */
794 #define MII_BMSR 0x01 /* Basic mode status register */
795 #define MII_PHYSID1 0x02 /* PHYS ID 1 */
796 #define MII_PHYSID2 0x03 /* PHYS ID 2 */
797 #define MII_ADVERTISE 0x04 /* Advertisement control reg */
798 #define MII_LPA 0x05 /* Link partner ability reg */
799 #define MII_EXPANSION 0x06 /* Expansion register */
800 /* Basic mode control register. */
801 #define BMCR_FULLDPLX 0x0100 /* Full duplex */
802 #define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
803 /* Basic mode status register. */
804 #define BMSR_ERCAP 0x0001 /* Ext-reg capability */
805 #define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */
806 #define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */
807 #define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */
808 #define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */
809 #define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */
810 /* Advertisement control register. */
811 #define ADVERTISE_CSMA 0x0001 /* Only selector supported */
812 #define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */
813 #define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */
814 #define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */
815 #define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */
816 #define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
817 ADVERTISE_100HALF | ADVERTISE_100FULL)
818 /* Expansion register for auto-negotiation. */
819 #define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */
822 /*****************************************************************************/
824 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
826 #ifndef pci_set_power_state
827 #define pci_set_power_state _kc_pci_set_power_state
828 extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
831 #ifndef pci_enable_wake
832 #define pci_enable_wake _kc_pci_enable_wake
833 extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
836 #ifndef pci_disable_device
837 #define pci_disable_device _kc_pci_disable_device
838 extern void _kc_pci_disable_device(struct pci_dev *pdev);
841 /* PCI PM entry point syntax changed, so don't support suspend/resume */
844 #endif /* 2.4.6 => 2.4.3 */
846 #ifndef HAVE_PCI_SET_MWI
847 #define pci_set_mwi(X) pci_write_config_word(X, \
848 PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
849 PCI_COMMAND_INVALIDATE);
850 #define pci_clear_mwi(X) pci_write_config_word(X, \
851 PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
852 ~PCI_COMMAND_INVALIDATE);
855 /*****************************************************************************/
856 /* 2.4.10 => 2.4.9 */
857 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
859 /**************************************/
862 #ifndef MODULE_LICENSE
863 #define MODULE_LICENSE(X)
866 /**************************************/
870 #define min(x,y) ({ \
871 const typeof(x) _x = (x); \
872 const typeof(y) _y = (y); \
873 (void) (&_x == &_y); \
874 _x < _y ? _x : _y; })
877 #define max(x,y) ({ \
878 const typeof(x) _x = (x); \
879 const typeof(y) _y = (y); \
880 (void) (&_x == &_y); \
881 _x > _y ? _x : _y; })
883 #define min_t(type,x,y) ({ \
886 _x < _y ? _x : _y; })
888 #define max_t(type,x,y) ({ \
891 _x > _y ? _x : _y; })
893 #ifndef list_for_each_safe
894 #define list_for_each_safe(pos, n, head) \
895 for (pos = (head)->next, n = pos->next; pos != (head); \
896 pos = n, n = pos->next)
899 #ifndef ____cacheline_aligned_in_smp
901 #define ____cacheline_aligned_in_smp ____cacheline_aligned
903 #define ____cacheline_aligned_in_smp
904 #endif /* CONFIG_SMP */
907 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
908 extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
909 #define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
910 extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
911 #define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
912 #else /* 2.4.8 => 2.4.9 */
913 extern int snprintf(char * buf, size_t size, const char *fmt, ...);
914 extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
916 #endif /* 2.4.10 -> 2.4.6 */
919 /*****************************************************************************/
920 /* 2.4.12 => 2.4.10 */
921 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
922 #ifndef HAVE_NETIF_MSG
923 #define HAVE_NETIF_MSG 1
925 NETIF_MSG_DRV = 0x0001,
926 NETIF_MSG_PROBE = 0x0002,
927 NETIF_MSG_LINK = 0x0004,
928 NETIF_MSG_TIMER = 0x0008,
929 NETIF_MSG_IFDOWN = 0x0010,
930 NETIF_MSG_IFUP = 0x0020,
931 NETIF_MSG_RX_ERR = 0x0040,
932 NETIF_MSG_TX_ERR = 0x0080,
933 NETIF_MSG_TX_QUEUED = 0x0100,
934 NETIF_MSG_INTR = 0x0200,
935 NETIF_MSG_TX_DONE = 0x0400,
936 NETIF_MSG_RX_STATUS = 0x0800,
937 NETIF_MSG_PKTDATA = 0x1000,
938 NETIF_MSG_HW = 0x2000,
939 NETIF_MSG_WOL = 0x4000,
942 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
943 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
944 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
945 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
946 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
947 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
948 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
949 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
950 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
951 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
952 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
953 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
954 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
955 #endif /* !HAVE_NETIF_MSG */
956 #endif /* 2.4.12 => 2.4.10 */
958 /*****************************************************************************/
959 /* 2.4.13 => 2.4.12 */
960 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
962 /**************************************/
963 /* PCI DMA MAPPING */
966 #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
970 #define pci_map_page _kc_pci_map_page
971 extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
974 #ifndef pci_unmap_page
975 #define pci_unmap_page _kc_pci_unmap_page
976 extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
979 /* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
981 #undef DMA_32BIT_MASK
982 #define DMA_32BIT_MASK 0xffffffff
983 #undef DMA_64BIT_MASK
984 #define DMA_64BIT_MASK 0xffffffff
986 /**************************************/
990 #define cpu_relax() rep_nop()
994 unsigned char h_dest[ETH_ALEN];
995 unsigned char h_source[ETH_ALEN];
996 unsigned short h_vlan_proto;
997 unsigned short h_vlan_TCI;
998 unsigned short h_vlan_encapsulated_proto;
1000 #endif /* 2.4.13 => 2.4.12 */
1002 /*****************************************************************************/
1003 /* 2.4.17 => 2.4.12 */
1004 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
1007 #define __devexit_p(x) &(x)
1011 /* For Kernel 3.8 these are not defined - so undefine all */
1015 #undef __devinitdata
1016 #define __devexit_p(x) &(x)
1019 #define __devinitdata
1021 #endif /* 2.4.17 => 2.4.13 */
1023 /*****************************************************************************/
1024 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
1025 #define NETIF_MSG_HW 0x2000
1026 #define NETIF_MSG_WOL 0x4000
1028 #ifndef netif_msg_hw
1029 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
1031 #ifndef netif_msg_wol
1032 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
1036 /*****************************************************************************/
1038 /*****************************************************************************/
1039 /* 2.4.20 => 2.4.19 */
1040 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
1042 /* we won't support NAPI on less than 2.4.20 */
1047 #endif /* 2.4.20 => 2.4.19 */
1049 /*****************************************************************************/
1050 /* 2.4.22 => 2.4.17 */
1051 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1052 #define pci_name(x) ((x)->slot_name)
1054 #ifndef SUPPORTED_10000baseT_Full
1055 #define SUPPORTED_10000baseT_Full (1 << 12)
1057 #ifndef ADVERTISED_10000baseT_Full
1058 #define ADVERTISED_10000baseT_Full (1 << 12)
1062 /*****************************************************************************/
1063 /* 2.4.22 => 2.4.17 */
1065 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1071 /*****************************************************************************/
1072 /*****************************************************************************/
1073 /* 2.4.23 => 2.4.22 */
1074 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
1075 /*****************************************************************************/
1077 #ifndef netif_poll_disable
1078 #define netif_poll_disable(x) _kc_netif_poll_disable(x)
1079 static inline void _kc_netif_poll_disable(struct net_device *netdev)
1081 while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
1083 current->state = TASK_INTERRUPTIBLE;
1084 schedule_timeout(1);
1088 #ifndef netif_poll_enable
1089 #define netif_poll_enable(x) _kc_netif_poll_enable(x)
1090 static inline void _kc_netif_poll_enable(struct net_device *netdev)
1092 clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
1096 #ifndef netif_tx_disable
1097 #define netif_tx_disable(x) _kc_netif_tx_disable(x)
1098 static inline void _kc_netif_tx_disable(struct net_device *dev)
1100 spin_lock_bh(&dev->xmit_lock);
1101 netif_stop_queue(dev);
1102 spin_unlock_bh(&dev->xmit_lock);
1105 #else /* 2.4.23 => 2.4.22 */
1107 #endif /* 2.4.23 => 2.4.22 */
1109 /*****************************************************************************/
1110 /* 2.6.4 => 2.6.0 */
1111 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
1112 ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1113 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
1114 #define ETHTOOL_OPS_COMPAT
1115 #endif /* 2.6.4 => 2.6.0 */
1117 /*****************************************************************************/
1118 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
1120 #endif /* < 2.4.27 */
1122 /*****************************************************************************/
1123 /* 2.5.71 => 2.4.x */
1124 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
1125 #define sk_protocol protocol
1126 #define pci_get_device pci_find_device
1127 #endif /* 2.5.70 => 2.4.x */
1129 /*****************************************************************************/
1130 /* < 2.4.27 or 2.6.0 <= 2.6.5 */
1131 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
1132 ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1133 LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
1135 #ifndef netif_msg_init
1136 #define netif_msg_init _kc_netif_msg_init
1137 static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
1140 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1141 return default_msg_enable_bits;
1142 if (debug_value == 0) /* no output */
1144 /* set low N bits */
1145 return (1 << debug_value) -1;
1149 #endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
1150 /*****************************************************************************/
1151 #if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
1152 (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
1153 ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
1154 #define netdev_priv(x) x->priv
1157 /*****************************************************************************/
1159 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
1160 #include <linux/rtnetlink.h>
1161 #undef pci_register_driver
1162 #define pci_register_driver pci_module_init
1165 * Most of the dma compat code is copied/modifed from the 2.4.37
1166 * /include/linux/libata-compat.h header file
1168 /* These definitions mirror those in pci.h, so they can be used
1169 * interchangeably with their PCI_ counterparts */
1170 enum dma_data_direction {
1171 DMA_BIDIRECTIONAL = 0,
1173 DMA_FROM_DEVICE = 2,
1178 struct pci_dev pdev;
1181 static inline struct pci_dev *to_pci_dev (struct device *dev)
1183 return (struct pci_dev *) dev;
1185 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1187 return (struct device *) pdev;
1190 #define pdev_printk(lvl, pdev, fmt, args...) \
1191 printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
1192 #define dev_err(dev, fmt, args...) \
1193 pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
1194 #define dev_info(dev, fmt, args...) \
1195 pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
1196 #define dev_warn(dev, fmt, args...) \
1197 pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
1198 #define dev_notice(dev, fmt, args...) \
1199 pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
1200 #define dev_dbg(dev, fmt, args...) \
1201 pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
1203 /* NOTE: dangerous! we ignore the 'gfp' argument */
1204 #define dma_alloc_coherent(dev,sz,dma,gfp) \
1205 pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
1206 #define dma_free_coherent(dev,sz,addr,dma_addr) \
1207 pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
1209 #define dma_map_page(dev,a,b,c,d) \
1210 pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
1211 #define dma_unmap_page(dev,a,b,c) \
1212 pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
1214 #define dma_map_single(dev,a,b,c) \
1215 pci_map_single(to_pci_dev(dev),(a),(b),(c))
1216 #define dma_unmap_single(dev,a,b,c) \
1217 pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
1219 #define dma_map_sg(dev, sg, nents, dir) \
1220 pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
1221 #define dma_unmap_sg(dev, sg, nents, dir) \
1222 pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
1224 #define dma_sync_single(dev,a,b,c) \
1225 pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
1227 /* for range just sync everything, that's all the pci API can do */
1228 #define dma_sync_single_range(dev,addr,off,sz,dir) \
1229 pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
1231 #define dma_set_mask(dev,mask) \
1232 pci_set_dma_mask(to_pci_dev(dev),(mask))
1234 /* hlist_* code - double linked lists */
1236 struct hlist_node *first;
1240 struct hlist_node *next, **pprev;
1243 static inline void __hlist_del(struct hlist_node *n)
1245 struct hlist_node *next = n->next;
1246 struct hlist_node **pprev = n->pprev;
1249 next->pprev = pprev;
1252 static inline void hlist_del(struct hlist_node *n)
1259 static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1261 struct hlist_node *first = h->first;
1264 first->pprev = &n->next;
1266 n->pprev = &h->first;
1269 static inline int hlist_empty(const struct hlist_head *h)
1273 #define HLIST_HEAD_INIT { .first = NULL }
1274 #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
1275 #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1276 static inline void INIT_HLIST_NODE(struct hlist_node *h)
1283 #define might_sleep()
1286 static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1290 #endif /* <= 2.5.0 */
1292 /*****************************************************************************/
1293 /* 2.5.28 => 2.4.23 */
1294 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
1296 #include <linux/tqueue.h>
1297 #define work_struct tq_struct
1299 #define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
1301 #define container_of list_entry
1302 #define schedule_work schedule_task
1303 #define flush_scheduled_work flush_scheduled_tasks
1304 #define cancel_work_sync(x) flush_scheduled_work()
1306 #endif /* 2.5.28 => 2.4.17 */
1308 /*****************************************************************************/
1309 /* 2.6.0 => 2.5.28 */
1310 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
1311 #ifndef read_barrier_depends
1312 #define read_barrier_depends() rmb()
1316 #define get_cpu() smp_processor_id()
1318 #define put_cpu() do { } while(0)
1319 #define MODULE_INFO(version, _version)
1320 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1321 #define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
1323 #ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
1324 #define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
1327 #define dma_set_coherent_mask(dev,mask) 1
1330 #define dev_put(dev) __dev_put(dev)
1332 #ifndef skb_fill_page_desc
1333 #define skb_fill_page_desc _kc_skb_fill_page_desc
1334 extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
1338 #define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
1341 #define page_count(p) atomic_read(&(p)->count)
1347 #define MAX_NUMNODES 1
1349 /* find_first_bit and find_next bit are not defined for most
1350 * 2.4 kernels (except for the redhat 2.4.21 kernels
1352 #include <linux/bitops.h>
1353 #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
1354 #undef find_next_bit
1355 #define find_next_bit _kc_find_next_bit
1356 extern unsigned long _kc_find_next_bit(const unsigned long *addr,
1358 unsigned long offset);
1359 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
1363 static inline const char *_kc_netdev_name(const struct net_device *dev)
1365 if (strchr(dev->name, '%'))
1366 return "(unregistered net_device)";
1369 #define netdev_name(netdev) _kc_netdev_name(netdev)
1370 #endif /* netdev_name */
1373 #define strlcpy _kc_strlcpy
1374 extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
1375 #endif /* strlcpy */
1378 #if BITS_PER_LONG == 64
1379 # define do_div(n,base) ({ \
1380 uint32_t __base = (base); \
1382 __rem = ((uint64_t)(n)) % __base; \
1383 (n) = ((uint64_t)(n)) / __base; \
1386 #elif BITS_PER_LONG == 32
1387 extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
1388 # define do_div(n,base) ({ \
1389 uint32_t __base = (base); \
1391 if (likely(((n) >> 32) == 0)) { \
1392 __rem = (uint32_t)(n) % __base; \
1393 (n) = (uint32_t)(n) / __base; \
1395 __rem = _kc__div64_32(&(n), __base); \
1398 #else /* BITS_PER_LONG == ?? */
1399 # error do_div() does not yet support the C64
1400 #endif /* BITS_PER_LONG */
1403 #ifndef NSEC_PER_SEC
1404 #define NSEC_PER_SEC 1000000000L
1407 #undef HAVE_I2C_SUPPORT
1409 #if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
1410 (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
1411 #define HAVE_I2C_SUPPORT
1412 #endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
1414 #endif /* 2.6.0 => 2.5.28 */
1415 /*****************************************************************************/
1416 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
1417 #define dma_pool pci_pool
1418 #define dma_pool_destroy pci_pool_destroy
1419 #define dma_pool_alloc pci_pool_alloc
1420 #define dma_pool_free pci_pool_free
1422 #define dma_pool_create(name,dev,size,align,allocation) \
1423 pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
1424 #endif /* < 2.6.3 */
1426 /*****************************************************************************/
1427 /* 2.6.4 => 2.6.0 */
1428 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1429 #define MODULE_VERSION(_version) MODULE_INFO(version, _version)
1430 #endif /* 2.6.4 => 2.6.0 */
1432 /*****************************************************************************/
1433 /* 2.6.5 => 2.6.0 */
1434 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
1435 #define dma_sync_single_for_cpu dma_sync_single
1436 #define dma_sync_single_for_device dma_sync_single
1437 #define dma_sync_single_range_for_cpu dma_sync_single_range
1438 #define dma_sync_single_range_for_device dma_sync_single_range
1439 #ifndef pci_dma_mapping_error
1440 #define pci_dma_mapping_error _kc_pci_dma_mapping_error
1441 static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
1443 return dma_addr == 0;
1446 #endif /* 2.6.5 => 2.6.0 */
1448 /*****************************************************************************/
1449 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1450 extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
1451 #define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
1452 #endif /* < 2.6.4 */
1454 /*****************************************************************************/
1455 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
1456 /* taken from 2.6 include/linux/bitmap.h */
1458 #define bitmap_zero _kc_bitmap_zero
1459 static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
1461 if (nbits <= BITS_PER_LONG)
1464 int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
1465 memset(dst, 0, len);
1468 #define random_ether_addr _kc_random_ether_addr
1469 static inline void _kc_random_ether_addr(u8 *addr)
1471 get_random_bytes(addr, ETH_ALEN);
1472 addr[0] &= 0xfe; /* clear multicast */
1473 addr[0] |= 0x02; /* set local assignment */
1475 #define page_to_nid(x) 0
1477 #endif /* < 2.6.6 */
1479 /*****************************************************************************/
1480 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
1482 #define if_mii _kc_if_mii
1483 static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
1485 return (struct mii_ioctl_data *) &rq->ifr_ifru;
1491 #endif /* < 2.6.7 */
1493 /*****************************************************************************/
1494 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
1495 #ifndef PCI_EXP_DEVCTL
1496 #define PCI_EXP_DEVCTL 8
1498 #ifndef PCI_EXP_DEVCTL_CERE
1499 #define PCI_EXP_DEVCTL_CERE 0x0001
1501 #define PCI_EXP_FLAGS 2 /* Capabilities register */
1502 #define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
1503 #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
1504 #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
1505 #define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
1506 #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
1507 #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
1508 #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
1509 #define PCI_EXP_DEVCAP 4 /* Device capabilities */
1510 #define PCI_EXP_DEVSTA 10 /* Device Status */
1511 #define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \
1512 schedule_timeout((x * HZ)/1000 + 2); \
1515 #endif /* < 2.6.8 */
1517 /*****************************************************************************/
1518 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
1519 #include <net/dsfield.h>
1523 #define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
1524 extern void *_kc_kzalloc(size_t size, int flags);
1526 #define MSEC_PER_SEC 1000L
1527 static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
1529 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1530 return (MSEC_PER_SEC / HZ) * j;
1531 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1532 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
1534 return (j * MSEC_PER_SEC) / HZ;
1537 static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
1539 if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
1540 return MAX_JIFFY_OFFSET;
1541 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1542 return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
1543 #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1544 return m * (HZ / MSEC_PER_SEC);
1546 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
1550 #define msleep_interruptible _kc_msleep_interruptible
1551 static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
1553 unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
1555 while (timeout && !signal_pending(current)) {
1556 __set_current_state(TASK_INTERRUPTIBLE);
1557 timeout = schedule_timeout(timeout);
1559 return _kc_jiffies_to_msecs(timeout);
1562 /* Basic mode control register. */
1563 #define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
1584 static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
1586 return (struct vlan_ethhdr *)skb->mac.raw;
1589 /* Wake-On-Lan options. */
1590 #define WAKE_PHY (1 << 0)
1591 #define WAKE_UCAST (1 << 1)
1592 #define WAKE_MCAST (1 << 2)
1593 #define WAKE_BCAST (1 << 3)
1594 #define WAKE_ARP (1 << 4)
1595 #define WAKE_MAGIC (1 << 5)
1596 #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
1598 #define skb_header_pointer _kc_skb_header_pointer
1599 static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
1600 int offset, int len, void *buffer)
1602 int hlen = skb_headlen(skb);
1604 if (hlen - offset >= len)
1605 return skb->data + offset;
1607 #ifdef MAX_SKB_FRAGS
1608 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1616 #ifndef NETDEV_TX_OK
1617 #define NETDEV_TX_OK 0
1619 #ifndef NETDEV_TX_BUSY
1620 #define NETDEV_TX_BUSY 1
1622 #ifndef NETDEV_TX_LOCKED
1623 #define NETDEV_TX_LOCKED -1
1630 #endif /* < 2.6.9 */
1632 /*****************************************************************************/
1633 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
1634 #ifdef module_param_array_named
1635 #undef module_param_array_named
1636 #define module_param_array_named(name, array, type, nump, perm) \
1637 static struct kparam_array __param_arr_##name \
1638 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
1639 sizeof(array[0]), array }; \
1640 module_param_call(name, param_array_set, param_array_get, \
1641 &__param_arr_##name, perm)
1642 #endif /* module_param_array_named */
1644 * num_online is broken for all < 2.6.10 kernels. This is needed to support
1645 * Node module parameter of ixgbe.
1647 #undef num_online_nodes
1648 #define num_online_nodes(n) 1
1649 extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
1650 #undef node_online_map
1651 #define node_online_map _kcompat_node_online_map
1652 #define pci_get_class pci_find_class
1653 #endif /* < 2.6.10 */
1655 /*****************************************************************************/
1656 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
1661 #define PCI_D3cold 4
1662 typedef int pci_power_t;
1663 #define pci_choose_state(pdev,state) state
1664 #define PMSG_SUSPEND 3
1665 #define PCI_EXP_LNKCTL 16
1669 #ifndef ARCH_HAS_PREFETCH
1673 #ifndef NET_IP_ALIGN
1674 #define NET_IP_ALIGN 2
1677 #define KC_USEC_PER_SEC 1000000L
1678 #define usecs_to_jiffies _kc_usecs_to_jiffies
1679 static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
1681 #if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1682 return (KC_USEC_PER_SEC / HZ) * j;
1683 #elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1684 return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
1686 return (j * KC_USEC_PER_SEC) / HZ;
1689 static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
1691 if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
1692 return MAX_JIFFY_OFFSET;
1693 #if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1694 return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
1695 #elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1696 return m * (HZ / KC_USEC_PER_SEC);
1698 return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
1702 #define PCI_EXP_LNKCAP 12 /* Link Capabilities */
1703 #define PCI_EXP_LNKSTA 18 /* Link Status */
1704 #define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
1705 #define PCI_EXP_SLTCTL 24 /* Slot Control */
1706 #define PCI_EXP_SLTSTA 26 /* Slot Status */
1707 #define PCI_EXP_RTCTL 28 /* Root Control */
1708 #define PCI_EXP_RTCAP 30 /* Root Capabilities */
1709 #define PCI_EXP_RTSTA 32 /* Root Status */
1710 #endif /* < 2.6.11 */
1712 /*****************************************************************************/
1713 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
1714 #include <linux/reboot.h>
1715 #define USE_REBOOT_NOTIFIER
1717 /* Generic MII registers. */
1718 #define MII_CTRL1000 0x09 /* 1000BASE-T control */
1719 #define MII_STAT1000 0x0a /* 1000BASE-T status */
1720 /* Advertisement control register. */
1721 #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */
1722 #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */
1723 /* Link partner ability register. */
1724 #define LPA_PAUSE_CAP 0x0400 /* Can pause */
1725 #define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */
1726 /* 1000BASE-T Control register */
1727 #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
1728 #define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
1729 /* 1000BASE-T Status register */
1730 #define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
1731 #define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
1733 #ifndef is_zero_ether_addr
1734 #define is_zero_ether_addr _kc_is_zero_ether_addr
1735 static inline int _kc_is_zero_ether_addr(const u8 *addr)
1737 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
1739 #endif /* is_zero_ether_addr */
1740 #ifndef is_multicast_ether_addr
1741 #define is_multicast_ether_addr _kc_is_multicast_ether_addr
1742 static inline int _kc_is_multicast_ether_addr(const u8 *addr)
1744 return addr[0] & 0x01;
1746 #endif /* is_multicast_ether_addr */
1747 #endif /* < 2.6.12 */
1749 /*****************************************************************************/
1750 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
1752 #define kstrdup _kc_kstrdup
1753 extern char *_kc_kstrdup(const char *s, unsigned int gfp);
1755 #endif /* < 2.6.13 */
1757 /*****************************************************************************/
1758 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
1759 #define pm_message_t u32
1761 #define kzalloc _kc_kzalloc
1762 extern void *_kc_kzalloc(size_t size, int flags);
1765 /* Generic MII registers. */
1766 #define MII_ESTATUS 0x0f /* Extended Status */
1767 /* Basic mode status register. */
1768 #define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */
1769 /* Extended status register. */
1770 #define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */
1771 #define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */
1773 #define SUPPORTED_Pause (1 << 13)
1774 #define SUPPORTED_Asym_Pause (1 << 14)
1775 #define ADVERTISED_Pause (1 << 13)
1776 #define ADVERTISED_Asym_Pause (1 << 14)
1778 #if (!(RHEL_RELEASE_CODE && \
1779 (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
1780 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
1781 #if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
1782 #define gfp_t unsigned
1784 typedef unsigned gfp_t;
1786 #endif /* !RHEL4.3->RHEL5.0 */
1788 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
1789 #ifdef CONFIG_X86_64
1790 #define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \
1791 dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
1792 #define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \
1793 dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
1796 #endif /* < 2.6.14 */
1798 /*****************************************************************************/
1799 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
1800 #ifndef vmalloc_node
1801 #define vmalloc_node(a,b) vmalloc(a)
1802 #endif /* vmalloc_node*/
1804 #define setup_timer(_timer, _function, _data) \
1806 (_timer)->function = _function; \
1807 (_timer)->data = _data; \
1808 init_timer(_timer); \
1810 #ifndef device_can_wakeup
1811 #define device_can_wakeup(dev) (1)
1813 #ifndef device_set_wakeup_enable
1814 #define device_set_wakeup_enable(dev, val) do{}while(0)
1816 #ifndef device_init_wakeup
1817 #define device_init_wakeup(dev,val) do {} while (0)
1819 static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
1821 const u16 *a = (const u16 *) addr1;
1822 const u16 *b = (const u16 *) addr2;
1824 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
1826 #undef compare_ether_addr
1827 #define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
1828 #endif /* < 2.6.15 */
1830 /*****************************************************************************/
1831 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
1833 #define DEFINE_MUTEX(x) DECLARE_MUTEX(x)
1834 #define mutex_lock(x) down_interruptible(x)
1835 #define mutex_unlock(x) up(x)
1837 #ifndef ____cacheline_internodealigned_in_smp
1839 #define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
1841 #define ____cacheline_internodealigned_in_smp
1842 #endif /* CONFIG_SMP */
1843 #endif /* ____cacheline_internodealigned_in_smp */
1845 #else /* 2.6.16 and above */
1847 #define HAVE_PCI_ERS
1848 #if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
1849 #ifdef device_can_wakeup
1850 #undef device_can_wakeup
1851 #endif /* device_can_wakeup */
1852 #define device_can_wakeup(dev) 1
1853 #endif /* SLE_VERSION(10,4,0) */
1854 #endif /* < 2.6.16 */
1856 /*****************************************************************************/
1857 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
1859 #define dev_notice(dev, fmt, args...) \
1860 dev_printk(KERN_NOTICE, dev, fmt, ## args)
1863 #ifndef first_online_node
1864 #define first_online_node 0
1867 #define NET_SKB_PAD 16
1869 #endif /* < 2.6.17 */
1871 /*****************************************************************************/
1872 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
1875 #define irqreturn_t void
1880 #ifndef IRQF_PROBE_SHARED
1882 #define IRQF_PROBE_SHARED SA_PROBEIRQ
1884 #define IRQF_PROBE_SHARED 0
1889 #define IRQF_SHARED SA_SHIRQ
1893 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
1896 #ifndef FIELD_SIZEOF
1897 #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
1902 #define skb_is_gso _kc_skb_is_gso
1903 static inline int _kc_skb_is_gso(const struct sk_buff *skb)
1905 return skb_shinfo(skb)->gso_size;
1908 #define skb_is_gso(a) 0
1912 #ifndef resource_size_t
1913 #define resource_size_t unsigned long
1919 #define skb_pad(x,y) _kc_skb_pad(x, y)
1920 int _kc_skb_pad(struct sk_buff *skb, int pad);
1924 #define skb_padto(x,y) _kc_skb_padto(x, y)
1925 static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
1927 unsigned int size = skb->len;
1928 if(likely(size >= len))
1930 return _kc_skb_pad(skb, len - size);
1933 #ifndef DECLARE_PCI_UNMAP_ADDR
1934 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
1935 dma_addr_t ADDR_NAME
1936 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
1938 #define pci_unmap_addr(PTR, ADDR_NAME) \
1940 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
1941 (((PTR)->ADDR_NAME) = (VAL))
1942 #define pci_unmap_len(PTR, LEN_NAME) \
1944 #define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
1945 (((PTR)->LEN_NAME) = (VAL))
1946 #endif /* DECLARE_PCI_UNMAP_ADDR */
1947 #endif /* < 2.6.18 */
1949 /*****************************************************************************/
1950 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
1952 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
1953 #define i_private u.generic_ip
1954 #endif /* >= RHEL 5.0 */
1956 #ifndef DIV_ROUND_UP
1957 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
1959 #ifndef __ALIGN_MASK
1960 #define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
1962 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
1963 #if (!((RHEL_RELEASE_CODE && \
1964 ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
1965 RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
1966 (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
1967 typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
1969 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
1970 #undef CONFIG_INET_LRO
1971 #undef CONFIG_INET_LRO_MODULE
1974 #undef CONFIG_FCOE_MODULE
1975 #endif /* IXGBE_FCOE */
1977 typedef irqreturn_t (*new_handler_t)(int, void*);
1978 static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1980 typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
1981 typedef void (*new_handler_t)(int, void*);
1982 static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1983 #endif /* >= 2.5.x */
1985 irq_handler_t new_handler = (irq_handler_t) handler;
1986 return request_irq(irq, new_handler, flags, devname, dev_id);
1990 #define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
1992 #define irq_handler_t new_handler_t
1993 /* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
1994 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
1995 #define PCIE_CONFIG_SPACE_LEN 256
1996 #define PCI_CONFIG_SPACE_LEN 64
1997 #define PCIE_LINK_STATUS 0x12
1998 #define pci_config_space_ich8lan() do {} while(0)
1999 #undef pci_save_state
2000 extern int _kc_pci_save_state(struct pci_dev *);
2001 #define pci_save_state(pdev) _kc_pci_save_state(pdev)
2002 #undef pci_restore_state
2003 extern void _kc_pci_restore_state(struct pci_dev *);
2004 #define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
2005 #endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
2009 extern void _kc_free_netdev(struct net_device *);
2010 #define free_netdev(netdev) _kc_free_netdev(netdev)
2012 static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
2016 #define pci_disable_pcie_error_reporting(dev) do {} while (0)
2017 #define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
2019 extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
2020 #define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
2027 #include <linux/aer.h>
2028 #include <linux/string.h>
2029 #endif /* < 2.6.19 */
2031 /*****************************************************************************/
2032 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
2033 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
2035 #define INIT_WORK(_work, _func) \
2037 INIT_LIST_HEAD(&(_work)->entry); \
2038 (_work)->pending = 0; \
2039 (_work)->func = (void (*)(void *))_func; \
2040 (_work)->data = _work; \
2041 init_timer(&(_work)->timer); \
2046 #define PCI_VDEVICE(ven, dev) \
2047 PCI_VENDOR_ID_##ven, (dev), \
2048 PCI_ANY_ID, PCI_ANY_ID, 0, 0
2051 #ifndef PCI_VENDOR_ID_INTEL
2052 #define PCI_VENDOR_ID_INTEL 0x8086
2055 #ifndef round_jiffies
2056 #define round_jiffies(x) x
2059 #define csum_offset csum
2061 #define HAVE_EARLY_VMALLOC_NODE
2062 #define dev_to_node(dev) -1
2064 /* remove compiler warning with b=b, for unused variable */
2065 #define set_dev_node(a, b) do { (b) = (b); } while(0)
2067 #if (!(RHEL_RELEASE_CODE && \
2068 (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
2069 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
2070 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
2071 !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
2072 typedef __u16 __bitwise __sum16;
2073 typedef __u32 __bitwise __wsum;
2076 #if (!(RHEL_RELEASE_CODE && \
2077 (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
2078 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
2079 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
2080 !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
2081 static inline __wsum csum_unfold(__sum16 n)
2083 return (__force __wsum)n;
2087 #else /* < 2.6.20 */
2088 #define HAVE_DEVICE_NUMA_NODE
2089 #endif /* < 2.6.20 */
2091 /*****************************************************************************/
2092 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
2093 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
2094 #define NETDEV_CLASS_DEV
2095 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
2096 #define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
2097 #define vlan_group_set_device(vg, id, dev) \
2099 if (vg) vg->vlan_devices[id] = dev; \
2101 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
2102 #define pci_channel_offline(pdev) (pdev->error_state && \
2103 pdev->error_state != pci_channel_io_normal)
2104 #define pci_request_selected_regions(pdev, bars, name) \
2105 pci_request_regions(pdev, name)
2106 #define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
2109 #define __aligned(x) __attribute__((aligned(x)))
2112 extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
2113 #define netdev_to_dev(netdev) \
2114 pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
2116 static inline struct device *netdev_to_dev(struct net_device *netdev)
2118 return &netdev->dev;
2121 #endif /* < 2.6.21 */
2123 /*****************************************************************************/
2124 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
2125 #define tcp_hdr(skb) (skb->h.th)
2126 #define tcp_hdrlen(skb) (skb->h.th->doff << 2)
2127 #define skb_transport_offset(skb) (skb->h.raw - skb->data)
2128 #define skb_transport_header(skb) (skb->h.raw)
2129 #define ipv6_hdr(skb) (skb->nh.ipv6h)
2130 #define ip_hdr(skb) (skb->nh.iph)
2131 #define skb_network_offset(skb) (skb->nh.raw - skb->data)
2132 #define skb_network_header(skb) (skb->nh.raw)
2133 #define skb_tail_pointer(skb) skb->tail
2134 #define skb_reset_tail_pointer(skb) \
2136 skb->tail = skb->data; \
2138 #define skb_set_tail_pointer(skb, offset) \
2140 skb->tail = skb->data + offset; \
2142 #define skb_copy_to_linear_data(skb, from, len) \
2143 memcpy(skb->data, from, len)
2144 #define skb_copy_to_linear_data_offset(skb, offset, from, len) \
2145 memcpy(skb->data + offset, from, len)
2146 #define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
2147 #define pci_register_driver pci_module_init
2148 #define skb_mac_header(skb) skb->mac.raw
2150 #ifdef NETIF_F_MULTI_QUEUE
2151 #ifndef alloc_etherdev_mq
2152 #define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
2154 #endif /* NETIF_F_MULTI_QUEUE */
2157 #define ETH_FCS_LEN 4
2159 #define cancel_work_sync(x) flush_scheduled_work()
2161 #define udp_hdr _udp_hdr
2162 static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
2164 return (struct udphdr *)skb_transport_header(skb);
2171 #define cpu_to_be16(x) __constant_htons(x)
2173 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
2176 DUMP_PREFIX_ADDRESS,
2179 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
2181 #define hex_asc(x) "0123456789abcdef"[x]
2183 #include <linux/ctype.h>
2184 extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
2185 int prefix_type, int rowsize, int groupsize,
2186 const void *buf, size_t len, bool ascii);
2187 #define print_hex_dump(lvl, s, t, r, g, b, l, a) \
2188 _kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
2189 #ifndef ADVERTISED_2500baseX_Full
2190 #define ADVERTISED_2500baseX_Full (1 << 15)
2192 #ifndef SUPPORTED_2500baseX_Full
2193 #define SUPPORTED_2500baseX_Full (1 << 15)
2196 #ifdef HAVE_I2C_SUPPORT
2197 #include <linux/i2c.h>
2198 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
2199 struct i2c_board_info {
2200 char driver_name[KOBJ_NAME_LEN];
2201 char type[I2C_NAME_SIZE];
2202 unsigned short flags;
2203 unsigned short addr;
2204 void *platform_data;
2206 #define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
2208 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
2209 #define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
2210 extern struct i2c_client *
2211 _kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
2212 #endif /* HAVE_I2C_SUPPORT */
2215 #define ETH_TYPE_TRANS_SETS_DEV
2216 #define HAVE_NETDEV_STATS_IN_NETDEV
2217 #endif /* < 2.6.22 */
2219 /*****************************************************************************/
2220 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
2221 #undef SET_MODULE_OWNER
2222 #define SET_MODULE_OWNER(dev) do { } while (0)
2223 #endif /* > 2.6.22 */
2225 /*****************************************************************************/
2226 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
2227 #define netif_subqueue_stopped(_a, _b) 0
2229 #define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
2232 #ifndef CONFIG_PM_SLEEP
2233 #define CONFIG_PM_SLEEP CONFIG_PM
2236 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
2237 #define HAVE_ETHTOOL_GET_PERM_ADDR
2238 #endif /* 2.6.14 through 2.6.22 */
2239 #endif /* < 2.6.23 */
2241 /*****************************************************************************/
2242 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
2243 #ifndef ETH_FLAG_LRO
2244 #define ETH_FLAG_LRO NETIF_F_LRO
2247 /* if GRO is supported then the napi struct must already exist */
2249 /* NAPI API changes in 2.6.24 break everything */
2250 struct napi_struct {
2251 /* used to look up the real NAPI polling routine */
2252 int (*poll)(struct napi_struct *, int);
2253 struct net_device *dev;
2259 extern int __kc_adapter_clean(struct net_device *, int *);
2260 extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
2261 #define netif_napi_add(_netdev, _napi, _poll, _weight) \
2263 struct napi_struct *__napi = (_napi); \
2264 struct net_device *poll_dev = napi_to_poll_dev(__napi); \
2265 poll_dev->poll = &(__kc_adapter_clean); \
2266 poll_dev->priv = (_napi); \
2267 poll_dev->weight = (_weight); \
2268 set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
2269 set_bit(__LINK_STATE_START, &poll_dev->state);\
2270 dev_hold(poll_dev); \
2271 __napi->poll = &(_poll); \
2272 __napi->weight = (_weight); \
2273 __napi->dev = (_netdev); \
2275 #define netif_napi_del(_napi) \
2277 struct net_device *poll_dev = napi_to_poll_dev(_napi); \
2278 WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
2279 dev_put(poll_dev); \
2280 memset(poll_dev, 0, sizeof(struct net_device));\
2282 #define napi_schedule_prep(_napi) \
2283 (netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
2284 #define napi_schedule(_napi) \
2286 if (napi_schedule_prep(_napi)) \
2287 __netif_rx_schedule(napi_to_poll_dev(_napi)); \
2289 #define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
2290 #define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
2292 static inline void napi_synchronize(const struct napi_struct *n)
2294 struct net_device *dev = napi_to_poll_dev(n);
2296 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
2302 #define napi_synchronize(n) barrier()
2303 #endif /* CONFIG_SMP */
2304 #define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
2306 #define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
2308 #define napi_complete(_napi) \
2310 napi_gro_flush(_napi); \
2311 netif_rx_complete(napi_to_poll_dev(_napi)); \
2313 #endif /* NETIF_F_GRO */
2315 #define netif_napi_add(_netdev, _napi, _poll, _weight) \
2317 struct napi_struct *__napi = _napi; \
2318 _netdev->poll = &(_poll); \
2319 _netdev->weight = (_weight); \
2320 __napi->poll = &(_poll); \
2321 __napi->weight = (_weight); \
2322 __napi->dev = (_netdev); \
2324 #define netif_napi_del(_a) do {} while (0)
2327 #undef dev_get_by_name
2328 #define dev_get_by_name(_a, _b) dev_get_by_name(_b)
2329 #define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
2330 #ifndef DMA_BIT_MASK
2331 #define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
2335 #define skb_is_gso_v6 _kc_skb_is_gso_v6
2336 static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
2338 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2340 #endif /* NETIF_F_TSO6 */
2343 #define KERN_CONT ""
2346 #define pr_err(fmt, arg...) \
2347 printk(KERN_ERR fmt, ##arg)
2349 #else /* < 2.6.24 */
2350 #define HAVE_ETHTOOL_GET_SSET_COUNT
2351 #define HAVE_NETDEV_NAPI_LIST
2352 #endif /* < 2.6.24 */
2354 /*****************************************************************************/
2355 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
2356 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
2357 #include <linux/pm_qos_params.h>
2358 #else /* >= 3.2.0 */
2359 #include <linux/pm_qos.h>
2360 #endif /* else >= 3.2.0 */
2361 #endif /* > 2.6.24 */
2363 /*****************************************************************************/
2364 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
2365 #define PM_QOS_CPU_DMA_LATENCY 1
2367 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
2368 #include <linux/latency.h>
2369 #define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY
2370 #define pm_qos_add_requirement(pm_qos_class, name, value) \
2371 set_acceptable_latency(name, value)
2372 #define pm_qos_remove_requirement(pm_qos_class, name) \
2373 remove_acceptable_latency(name)
2374 #define pm_qos_update_requirement(pm_qos_class, name, value) \
2375 modify_acceptable_latency(name, value)
2377 #define PM_QOS_DEFAULT_VALUE -1
2378 #define pm_qos_add_requirement(pm_qos_class, name, value)
2379 #define pm_qos_remove_requirement(pm_qos_class, name)
2380 #define pm_qos_update_requirement(pm_qos_class, name, value) { \
2381 if (value != PM_QOS_DEFAULT_VALUE) { \
2382 printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
2383 pci_name(adapter->pdev)); \
2387 #endif /* > 2.6.18 */
2389 #define pci_enable_device_mem(pdev) pci_enable_device(pdev)
2391 #ifndef DEFINE_PCI_DEVICE_TABLE
2392 #define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
2393 #endif /* DEFINE_PCI_DEVICE_TABLE */
2396 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
2399 #endif /* IGB_PROCFS */
2400 #endif /* >= 2.6.0 */
2402 #else /* < 2.6.25 */
2405 #if IS_ENABLED(CONFIG_HWMON)
2408 #endif /* IGB_HWMON */
2409 #endif /* CONFIG_HWMON */
2411 #endif /* < 2.6.25 */
2413 /*****************************************************************************/
2414 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
2416 #define clamp_t(type, val, min, max) ({ \
2417 type __val = (val); \
2418 type __min = (min); \
2419 type __max = (max); \
2420 __val = __val < __min ? __min : __val; \
2421 __val > __max ? __max : __val; })
2422 #endif /* clamp_t */
2424 #define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
2426 extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
2427 #define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
2428 #else /* < 2.6.26 */
2429 #include <linux/pci-aspm.h>
2430 #define HAVE_NETDEV_VLAN_FEATURES
2431 #ifndef PCI_EXP_LNKCAP_ASPMS
2432 #define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
2433 #endif /* PCI_EXP_LNKCAP_ASPMS */
2434 #endif /* < 2.6.26 */
2435 /*****************************************************************************/
2436 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
2437 static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
2440 ep->speed = (__u16)speed;
2441 /* ep->speed_hi = (__u16)(speed >> 16); */
2443 #define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
2445 static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
2447 /* no speed_hi before 2.6.27, and probably no need for it yet */
2448 return (__u32)ep->speed;
2450 #define ethtool_cmd_speed _kc_ethtool_cmd_speed
2452 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
2453 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
2454 #define ANCIENT_PM 1
2455 #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
2456 (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
2457 defined(CONFIG_PM_SLEEP))
2460 #if defined(ANCIENT_PM) || defined(NEWER_PM)
2461 #undef device_set_wakeup_enable
2462 #define device_set_wakeup_enable(dev, val) \
2465 int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
2467 pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
2470 (dev)->power.can_wakeup = !!(pmc >> 11); \
2471 (dev)->power.should_wakeup = (val && (pmc >> 11)); \
2473 #endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
2474 #endif /* 2.6.15 through 2.6.27 */
2475 #ifndef netif_napi_del
2476 #define netif_napi_del(_a) do {} while (0)
2478 #ifdef CONFIG_NETPOLL
2479 #undef netif_napi_del
2480 #define netif_napi_del(_a) list_del(&(_a)->dev_list);
2483 #endif /* netif_napi_del */
2484 #ifdef dma_mapping_error
2485 #undef dma_mapping_error
2487 #define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
2489 #ifdef CONFIG_NETDEVICES_MULTIQUEUE
2494 extern void _kc_netif_tx_stop_all_queues(struct net_device *);
2495 extern void _kc_netif_tx_wake_all_queues(struct net_device *);
2496 extern void _kc_netif_tx_start_all_queues(struct net_device *);
2497 #define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
2498 #define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
2499 #define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
2500 #undef netif_stop_subqueue
2501 #define netif_stop_subqueue(_ndev,_qi) do { \
2502 if (netif_is_multiqueue((_ndev))) \
2503 netif_stop_subqueue((_ndev), (_qi)); \
2505 netif_stop_queue((_ndev)); \
2507 #undef netif_start_subqueue
2508 #define netif_start_subqueue(_ndev,_qi) do { \
2509 if (netif_is_multiqueue((_ndev))) \
2510 netif_start_subqueue((_ndev), (_qi)); \
2512 netif_start_queue((_ndev)); \
2514 #else /* HAVE_TX_MQ */
2515 #define netif_tx_stop_all_queues(a) netif_stop_queue(a)
2516 #define netif_tx_wake_all_queues(a) netif_wake_queue(a)
2517 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
2518 #define netif_tx_start_all_queues(a) netif_start_queue(a)
2520 #define netif_tx_start_all_queues(a) do {} while (0)
2522 #define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
2523 #define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
2524 #endif /* HAVE_TX_MQ */
2525 #ifndef NETIF_F_MULTI_QUEUE
2526 #define NETIF_F_MULTI_QUEUE 0
2527 #define netif_is_multiqueue(a) 0
2528 #define netif_wake_subqueue(a, b)
2529 #endif /* NETIF_F_MULTI_QUEUE */
2531 #ifndef __WARN_printf
2532 extern void __kc_warn_slowpath(const char *file, const int line,
2533 const char *fmt, ...) __attribute__((format(printf, 3, 4)));
2534 #define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
2535 #endif /* __WARN_printf */
2538 #define WARN(condition, format...) ({ \
2539 int __ret_warn_on = !!(condition); \
2540 if (unlikely(__ret_warn_on)) \
2541 __WARN_printf(format); \
2542 unlikely(__ret_warn_on); \
2545 #undef HAVE_IXGBE_DEBUG_FS
2546 #undef HAVE_IGB_DEBUG_FS
2547 #else /* < 2.6.27 */
2549 #define HAVE_NETDEV_SELECT_QUEUE
2550 #ifdef CONFIG_DEBUG_FS
2551 #define HAVE_IXGBE_DEBUG_FS
2552 #define HAVE_IGB_DEBUG_FS
2553 #endif /* CONFIG_DEBUG_FS */
2554 #endif /* < 2.6.27 */
2556 /*****************************************************************************/
2557 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
2558 #define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \
2559 pci_resource_len(pdev, bar))
2560 #define pci_wake_from_d3 _kc_pci_wake_from_d3
2561 #define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
2562 extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
2563 extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
2564 #define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
2565 #ifndef __skb_queue_head_init
2566 static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
2568 list->prev = list->next = (struct sk_buff *)list;
2571 #define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
2574 #define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
2575 #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
2577 #endif /* < 2.6.28 */
2579 /*****************************************************************************/
2580 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
2582 #define swap(a, b) \
2583 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
2585 #define pci_request_selected_regions_exclusive(pdev, bars, name) \
2586 pci_request_selected_regions(pdev, bars, name)
2587 #ifndef CONFIG_NR_CPUS
2588 #define CONFIG_NR_CPUS 1
2589 #endif /* CONFIG_NR_CPUS */
2590 #ifndef pcie_aspm_enabled
2591 #define pcie_aspm_enabled() (1)
2592 #endif /* pcie_aspm_enabled */
2594 #define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
2596 #ifndef pci_clear_master
2597 extern void _kc_pci_clear_master(struct pci_dev *dev);
2598 #define pci_clear_master(dev) _kc_pci_clear_master(dev)
2601 #ifndef PCI_EXP_LNKCTL_ASPMC
2602 #define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
2604 #else /* < 2.6.29 */
2605 #ifndef HAVE_NET_DEVICE_OPS
2606 #define HAVE_NET_DEVICE_OPS
2609 #define HAVE_PFC_MODE_ENABLE
2610 #endif /* CONFIG_DCB */
2611 #endif /* < 2.6.29 */
2613 /*****************************************************************************/
2614 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
2615 #define skb_rx_queue_recorded(a) false
2616 #define skb_get_rx_queue(a) 0
2617 #define skb_record_rx_queue(a, b) do {} while (0)
2618 #define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
2619 #ifndef CONFIG_PCI_IOV
2620 #undef pci_enable_sriov
2621 #define pci_enable_sriov(a, b) -ENOTSUPP
2622 #undef pci_disable_sriov
2623 #define pci_disable_sriov(a) do {} while (0)
2624 #endif /* CONFIG_PCI_IOV */
2626 #define pr_cont(fmt, ...) \
2627 printk(KERN_CONT fmt, ##__VA_ARGS__)
2628 #endif /* pr_cont */
2629 static inline void _kc_synchronize_irq(unsigned int a)
2631 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
2633 #else /* < 2.5.28 */
2635 #endif /* < 2.5.28 */
2637 #undef synchronize_irq
2638 #define synchronize_irq(a) _kc_synchronize_irq(a)
2640 #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
2642 #else /* < 2.6.30 */
2643 #define HAVE_ASPM_QUIRKS
2644 #endif /* < 2.6.30 */
2646 /*****************************************************************************/
2647 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
2648 #define ETH_P_1588 0x88F7
2649 #define ETH_P_FIP 0x8914
2650 #ifndef netdev_uc_count
2651 #define netdev_uc_count(dev) ((dev)->uc_count)
2653 #ifndef netdev_for_each_uc_addr
2654 #define netdev_for_each_uc_addr(uclist, dev) \
2655 for (uclist = dev->uc_list; uclist; uclist = uclist->next)
2658 #define PORT_OTHER 0xff
2660 #ifndef MDIO_PHY_ID_PRTAD
2661 #define MDIO_PHY_ID_PRTAD 0x03e0
2663 #ifndef MDIO_PHY_ID_DEVAD
2664 #define MDIO_PHY_ID_DEVAD 0x001f
2667 #define skb_dst(s) ((s)->dst)
2670 #ifndef SUPPORTED_1000baseKX_Full
2671 #define SUPPORTED_1000baseKX_Full (1 << 17)
2673 #ifndef SUPPORTED_10000baseKX4_Full
2674 #define SUPPORTED_10000baseKX4_Full (1 << 18)
2676 #ifndef SUPPORTED_10000baseKR_Full
2677 #define SUPPORTED_10000baseKR_Full (1 << 19)
2680 #ifndef ADVERTISED_1000baseKX_Full
2681 #define ADVERTISED_1000baseKX_Full (1 << 17)
2683 #ifndef ADVERTISED_10000baseKX4_Full
2684 #define ADVERTISED_10000baseKX4_Full (1 << 18)
2686 #ifndef ADVERTISED_10000baseKR_Full
2687 #define ADVERTISED_10000baseKR_Full (1 << 19)
2690 #else /* < 2.6.31 */
2691 #ifndef HAVE_NETDEV_STORAGE_ADDRESS
2692 #define HAVE_NETDEV_STORAGE_ADDRESS
2694 #ifndef HAVE_NETDEV_HW_ADDR
2695 #define HAVE_NETDEV_HW_ADDR
2697 #ifndef HAVE_TRANS_START_IN_QUEUE
2698 #define HAVE_TRANS_START_IN_QUEUE
2700 #ifndef HAVE_INCLUDE_LINUX_MDIO_H
2701 #define HAVE_INCLUDE_LINUX_MDIO_H
2703 #endif /* < 2.6.31 */
2705 /*****************************************************************************/
2706 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
2708 #define netdev_tx_t int
2709 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2710 #ifndef NETIF_F_FCOE_MTU
2711 #define NETIF_F_FCOE_MTU (1 << 26)
2713 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2715 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
2716 static inline int _kc_pm_runtime_get_sync()
2720 #define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync()
2721 #else /* 2.6.0 => 2.6.32 */
2722 static inline int _kc_pm_runtime_get_sync(struct device *dev)
2726 #ifndef pm_runtime_get_sync
2727 #define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev)
2729 #endif /* 2.6.0 => 2.6.32 */
2730 #ifndef pm_runtime_put
2731 #define pm_runtime_put(dev) do {} while (0)
2733 #ifndef pm_runtime_put_sync
2734 #define pm_runtime_put_sync(dev) do {} while (0)
2736 #ifndef pm_runtime_resume
2737 #define pm_runtime_resume(dev) do {} while (0)
2739 #ifndef pm_schedule_suspend
2740 #define pm_schedule_suspend(dev, t) do {} while (0)
2742 #ifndef pm_runtime_set_suspended
2743 #define pm_runtime_set_suspended(dev) do {} while (0)
2745 #ifndef pm_runtime_disable
2746 #define pm_runtime_disable(dev) do {} while (0)
2748 #ifndef pm_runtime_put_noidle
2749 #define pm_runtime_put_noidle(dev) do {} while (0)
2751 #ifndef pm_runtime_set_active
2752 #define pm_runtime_set_active(dev) do {} while (0)
2754 #ifndef pm_runtime_enable
2755 #define pm_runtime_enable(dev) do {} while (0)
2757 #ifndef pm_runtime_get_noresume
2758 #define pm_runtime_get_noresume(dev) do {} while (0)
2760 #else /* < 2.6.32 */
2761 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2762 #ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
2763 #define HAVE_NETDEV_OPS_FCOE_ENABLE
2765 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2767 #ifndef HAVE_DCBNL_OPS_GETAPP
2768 #define HAVE_DCBNL_OPS_GETAPP
2770 #endif /* CONFIG_DCB */
2771 #include <linux/pm_runtime.h>
2772 /* IOV bad DMA target work arounds require at least this kernel rev support */
2773 #define HAVE_PCIE_TYPE
2774 #endif /* < 2.6.32 */
2776 /*****************************************************************************/
2777 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
2778 #ifndef pci_pcie_cap
2779 #define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
2782 #define IPV4_FLOW 0x10
2783 #endif /* IPV4_FLOW */
2785 #define IPV6_FLOW 0x11
2786 #endif /* IPV6_FLOW */
2787 /* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
2788 #if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
2789 (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
2790 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2791 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2792 #define HAVE_NETDEV_OPS_FCOE_GETWWN
2794 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2795 #endif /* RHEL6 or SLES11 SP1 */
2798 #endif /* __percpu */
2800 #define PORT_DA PORT_OTHER
2803 #define PORT_NONE PORT_OTHER
2806 #if ((RHEL_RELEASE_CODE && \
2807 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
2808 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
2809 #if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
2810 #undef DEFINE_DMA_UNMAP_ADDR
2811 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
2812 #undef DEFINE_DMA_UNMAP_LEN
2813 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
2814 #undef dma_unmap_addr
2815 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
2816 #undef dma_unmap_addr_set
2817 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
2818 #undef dma_unmap_len
2819 #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
2820 #undef dma_unmap_len_set
2821 #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
2822 #endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
2823 #endif /* RHEL_RELEASE_CODE */
2825 #if (!(RHEL_RELEASE_CODE && \
2826 (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
2827 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
2828 ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
2829 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
2830 static inline bool pci_is_pcie(struct pci_dev *dev)
2832 return !!pci_pcie_cap(dev);
2834 #endif /* RHEL_RELEASE_CODE */
2836 #ifndef __always_unused
2837 #define __always_unused __attribute__((__unused__))
2839 #ifndef __maybe_unused
2840 #define __maybe_unused __attribute__((__unused__))
2843 #if (!(RHEL_RELEASE_CODE && \
2844 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
2845 #define sk_tx_queue_get(_sk) (-1)
2846 #define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
2847 #endif /* !(RHEL >= 6.2) */
2849 #if (RHEL_RELEASE_CODE && \
2850 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
2851 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
2852 #define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2853 #define HAVE_ETHTOOL_SET_PHYS_ID
2854 #define HAVE_ETHTOOL_GET_TS_INFO
2855 #endif /* RHEL >= 6.4 && RHEL < 7.0 */
2857 #if (RHEL_RELEASE_CODE && \
2858 (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
2859 (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
2860 #define HAVE_RHEL6_NETDEV_OPS_EXT_FDB
2861 #endif /* RHEL >= 6.5 && RHEL < 7.0 */
2863 #else /* < 2.6.33 */
2864 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2865 #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2866 #define HAVE_NETDEV_OPS_FCOE_GETWWN
2868 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2869 #endif /* < 2.6.33 */
2871 /*****************************************************************************/
2872 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
2873 #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
2875 #define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
2876 extern int _kc_pci_num_vf(struct pci_dev *dev);
2878 #endif /* RHEL_RELEASE_CODE */
2880 #ifndef ETH_FLAG_NTUPLE
2881 #define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
2884 #ifndef netdev_mc_count
2885 #define netdev_mc_count(dev) ((dev)->mc_count)
2887 #ifndef netdev_mc_empty
2888 #define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
2890 #ifndef netdev_for_each_mc_addr
2891 #define netdev_for_each_mc_addr(mclist, dev) \
2892 for (mclist = dev->mc_list; mclist; mclist = mclist->next)
2894 #ifndef netdev_uc_count
2895 #define netdev_uc_count(dev) ((dev)->uc.count)
2897 #ifndef netdev_uc_empty
2898 #define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
2900 #ifndef netdev_for_each_uc_addr
2901 #define netdev_for_each_uc_addr(ha, dev) \
2902 list_for_each_entry(ha, &dev->uc.list, list)
2904 #ifndef dma_set_coherent_mask
2905 #define dma_set_coherent_mask(dev,mask) \
2906 pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
2908 #ifndef pci_dev_run_wake
2909 #define pci_dev_run_wake(pdev) (0)
2912 /* netdev logging taken from include/linux/netdevice.h */
2914 static inline const char *_kc_netdev_name(const struct net_device *dev)
2916 if (dev->reg_state != NETREG_REGISTERED)
2917 return "(unregistered net_device)";
2920 #define netdev_name(netdev) _kc_netdev_name(netdev)
2921 #endif /* netdev_name */
2923 #undef netdev_printk
2924 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
2925 #define netdev_printk(level, netdev, format, args...) \
2927 struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
2928 printk(level "%s: " format, pci_name(pdev), ##args); \
2930 #elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
2931 #define netdev_printk(level, netdev, format, args...) \
2933 struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \
2934 struct device *dev = pci_dev_to_dev(pdev); \
2935 dev_printk(level, dev, "%s: " format, \
2936 netdev_name(netdev), ##args); \
2938 #else /* 2.6.21 => 2.6.34 */
2939 #define netdev_printk(level, netdev, format, args...) \
2940 dev_printk(level, (netdev)->dev.parent, \
2942 netdev_name(netdev), ##args)
2943 #endif /* <2.6.0 <2.6.21 <2.6.34 */
2945 #define netdev_emerg(dev, format, args...) \
2946 netdev_printk(KERN_EMERG, dev, format, ##args)
2948 #define netdev_alert(dev, format, args...) \
2949 netdev_printk(KERN_ALERT, dev, format, ##args)
2951 #define netdev_crit(dev, format, args...) \
2952 netdev_printk(KERN_CRIT, dev, format, ##args)
2954 #define netdev_err(dev, format, args...) \
2955 netdev_printk(KERN_ERR, dev, format, ##args)
2957 #define netdev_warn(dev, format, args...) \
2958 netdev_printk(KERN_WARNING, dev, format, ##args)
2959 #undef netdev_notice
2960 #define netdev_notice(dev, format, args...) \
2961 netdev_printk(KERN_NOTICE, dev, format, ##args)
2963 #define netdev_info(dev, format, args...) \
2964 netdev_printk(KERN_INFO, dev, format, ##args)
2967 #define netdev_dbg(__dev, format, args...) \
2968 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2969 #elif defined(CONFIG_DYNAMIC_DEBUG)
2970 #define netdev_dbg(__dev, format, args...) \
2972 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
2973 netdev_name(__dev), ##args); \
2976 #define netdev_dbg(__dev, format, args...) \
2979 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2985 #define netif_printk(priv, type, level, dev, fmt, args...) \
2987 if (netif_msg_##type(priv)) \
2988 netdev_printk(level, (dev), fmt, ##args); \
2992 #define netif_emerg(priv, type, dev, fmt, args...) \
2993 netif_level(emerg, priv, type, dev, fmt, ##args)
2995 #define netif_alert(priv, type, dev, fmt, args...) \
2996 netif_level(alert, priv, type, dev, fmt, ##args)
2998 #define netif_crit(priv, type, dev, fmt, args...) \
2999 netif_level(crit, priv, type, dev, fmt, ##args)
3001 #define netif_err(priv, type, dev, fmt, args...) \
3002 netif_level(err, priv, type, dev, fmt, ##args)
3004 #define netif_warn(priv, type, dev, fmt, args...) \
3005 netif_level(warn, priv, type, dev, fmt, ##args)
3007 #define netif_notice(priv, type, dev, fmt, args...) \
3008 netif_level(notice, priv, type, dev, fmt, ##args)
3010 #define netif_info(priv, type, dev, fmt, args...) \
3011 netif_level(info, priv, type, dev, fmt, ##args)
3013 #define netif_dbg(priv, type, dev, fmt, args...) \
3014 netif_level(dbg, priv, type, dev, fmt, ##args)
3016 #ifdef SET_SYSTEM_SLEEP_PM_OPS
3017 #define HAVE_SYSTEM_SLEEP_PM_OPS
3020 #ifndef for_each_set_bit
3021 #define for_each_set_bit(bit, addr, size) \
3022 for ((bit) = find_first_bit((addr), (size)); \
3024 (bit) = find_next_bit((addr), (size), (bit) + 1))
3025 #endif /* for_each_set_bit */
3027 #ifndef DEFINE_DMA_UNMAP_ADDR
3028 #define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
3029 #define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
3030 #define dma_unmap_addr pci_unmap_addr
3031 #define dma_unmap_addr_set pci_unmap_addr_set
3032 #define dma_unmap_len pci_unmap_len
3033 #define dma_unmap_len_set pci_unmap_len_set
3034 #endif /* DEFINE_DMA_UNMAP_ADDR */
3036 #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
3038 #ifdef CONFIG_DEBUG_LOCK_ALLOC
3039 #define sysfs_attr_init(attr) \
3041 static struct lock_class_key __key; \
3042 (attr)->key = &__key; \
3045 #define sysfs_attr_init(attr) do {} while (0)
3046 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
3047 #endif /* IGB_HWMON */
3048 #endif /* RHEL_RELEASE_CODE */
3050 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
3051 static inline bool _kc_pm_runtime_suspended()
3055 #define pm_runtime_suspended(dev) _kc_pm_runtime_suspended()
3056 #else /* 2.6.0 => 2.6.34 */
3057 static inline bool _kc_pm_runtime_suspended(struct device *dev)
3061 #ifndef pm_runtime_suspended
3062 #define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev)
3064 #endif /* 2.6.0 => 2.6.34 */
3066 #else /* < 2.6.34 */
3067 #define HAVE_SYSTEM_SLEEP_PM_OPS
3068 #ifndef HAVE_SET_RX_MODE
3069 #define HAVE_SET_RX_MODE
3072 #endif /* < 2.6.34 */
3074 /*****************************************************************************/
3075 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
3077 ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
3078 const void __user *from, size_t count);
3079 #define simple_write_to_buffer _kc_simple_write_to_buffer
3081 #ifndef numa_node_id
3082 #define numa_node_id() 0
3085 #include <net/sch_generic.h>
3086 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
3087 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
3088 void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
3089 #define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues
3090 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
3091 #else /* CONFIG_NETDEVICES_MULTI_QUEUE */
3092 #define netif_set_real_num_tx_queues(_netdev, _count) \
3094 (_netdev)->egress_subqueue_count = _count; \
3096 #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
3097 #else /* HAVE_TX_MQ */
3098 #define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
3099 #endif /* HAVE_TX_MQ */
3100 #ifndef ETH_FLAG_RXHASH
3101 #define ETH_FLAG_RXHASH (1<<28)
3102 #endif /* ETH_FLAG_RXHASH */
3103 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
3104 #define HAVE_IRQ_AFFINITY_HINT
3106 #else /* < 2.6.35 */
3107 #define HAVE_PM_QOS_REQUEST_LIST
3108 #define HAVE_IRQ_AFFINITY_HINT
3109 #endif /* < 2.6.35 */
3111 /*****************************************************************************/
3112 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
3113 extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
3114 #define ethtool_op_set_flags _kc_ethtool_op_set_flags
3115 extern u32 _kc_ethtool_op_get_flags(struct net_device *);
3116 #define ethtool_op_get_flags _kc_ethtool_op_get_flags
3118 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3122 #define NET_IP_ALIGN 0
3123 #endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
3129 #if (L1_CACHE_BYTES > 32)
3130 #define NET_SKB_PAD L1_CACHE_BYTES
3132 #define NET_SKB_PAD 32
3135 static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
3136 unsigned int length)
3138 struct sk_buff *skb;
3140 skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
3142 #if (NET_IP_ALIGN + NET_SKB_PAD)
3143 skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
3150 #ifdef netdev_alloc_skb_ip_align
3151 #undef netdev_alloc_skb_ip_align
3153 #define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
3156 #define netif_level(level, priv, type, dev, fmt, args...) \
3158 if (netif_msg_##type(priv)) \
3159 netdev_##level(dev, fmt, ##args); \
3163 #define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
3165 #define u64_stats_update_begin(a) do { } while(0)
3166 #define u64_stats_update_end(a) do { } while(0)
3167 #define u64_stats_fetch_begin(a) do { } while(0)
3168 #define u64_stats_fetch_retry_bh(a) (0)
3169 #define u64_stats_fetch_begin_bh(a) (0)
3171 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
3172 #define HAVE_8021P_SUPPORT
3175 #else /* < 2.6.36 */
3178 #define HAVE_PM_QOS_REQUEST_ACTIVE
3179 #define HAVE_8021P_SUPPORT
3180 #define HAVE_NDO_GET_STATS64
3181 #endif /* < 2.6.36 */
3183 /*****************************************************************************/
3184 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
3185 #ifndef netif_set_real_num_rx_queues
3186 static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
3191 #define netif_set_real_num_rx_queues(dev, rxq) \
3192 __kc_netif_set_real_num_rx_queues((dev), (rxq))
3194 #ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
3195 #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
3198 #define VLAN_N_VID VLAN_GROUP_ARRAY_LEN
3199 #endif /* VLAN_N_VID */
3200 #ifndef ETH_FLAG_TXVLAN
3201 #define ETH_FLAG_TXVLAN (1 << 7)
3202 #endif /* ETH_FLAG_TXVLAN */
3203 #ifndef ETH_FLAG_RXVLAN
3204 #define ETH_FLAG_RXVLAN (1 << 8)
3205 #endif /* ETH_FLAG_RXVLAN */
3207 static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
3209 WARN_ON(skb->ip_summed != CHECKSUM_NONE);
3211 #define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
3213 static inline void *_kc_vzalloc_node(unsigned long size, int node)
3215 void *addr = vmalloc_node(size, node);
3217 memset(addr, 0, size);
3220 #define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
3222 static inline void *_kc_vzalloc(unsigned long size)
3224 void *addr = vmalloc(size);
3226 memset(addr, 0, size);
3229 #define vzalloc(_size) _kc_vzalloc(_size)
3231 #ifndef vlan_get_protocol
3232 static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
3234 if (vlan_tx_tag_present(skb) ||
3235 skb->protocol != cpu_to_be16(ETH_P_8021Q))
3236 return skb->protocol;
3238 if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
3241 return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
3243 #define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
3245 #ifdef HAVE_HW_TIME_STAMP
3246 #define SKBTX_HW_TSTAMP (1 << 0)
3247 #define SKBTX_IN_PROGRESS (1 << 2)
3248 #define SKB_SHARED_TX_IS_UNION
3251 #ifndef device_wakeup_enable
3252 #define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true)
3255 #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
3256 #ifndef HAVE_VLAN_RX_REGISTER
3257 #define HAVE_VLAN_RX_REGISTER
3259 #endif /* > 2.4.18 */
3260 #endif /* < 2.6.37 */
3262 /*****************************************************************************/
3263 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
3264 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
3265 #define skb_checksum_start_offset(skb) skb_transport_offset(skb)
3266 #else /* 2.6.22 -> 2.6.37 */
3267 static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
3269 return skb->csum_start - skb_headroom(skb);
3271 #define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
3272 #endif /* 2.6.22 -> 2.6.37 */
3274 #ifndef IEEE_8021QAZ_MAX_TCS
3275 #define IEEE_8021QAZ_MAX_TCS 8
3277 #ifndef DCB_CAP_DCBX_HOST
3278 #define DCB_CAP_DCBX_HOST 0x01
3280 #ifndef DCB_CAP_DCBX_LLD_MANAGED
3281 #define DCB_CAP_DCBX_LLD_MANAGED 0x02
3283 #ifndef DCB_CAP_DCBX_VER_CEE
3284 #define DCB_CAP_DCBX_VER_CEE 0x04
3286 #ifndef DCB_CAP_DCBX_VER_IEEE
3287 #define DCB_CAP_DCBX_VER_IEEE 0x08
3289 #ifndef DCB_CAP_DCBX_STATIC
3290 #define DCB_CAP_DCBX_STATIC 0x10
3292 #endif /* CONFIG_DCB */
3293 #if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
3295 #endif /* RHEL_RELEASE_VERSION(6,2) */
3296 #endif /* < 2.6.38 */
3298 /*****************************************************************************/
3299 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
3300 #ifndef NETIF_F_RXCSUM
3301 #define NETIF_F_RXCSUM (1 << 29)
3303 #ifndef skb_queue_reverse_walk_safe
3304 #define skb_queue_reverse_walk_safe(queue, skb, tmp) \
3305 for (skb = (queue)->prev, tmp = skb->prev; \
3306 skb != (struct sk_buff *)(queue); \
3307 skb = tmp, tmp = skb->prev)
3309 #else /* < 2.6.39 */
3310 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
3311 #ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
3312 #define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
3314 #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
3318 #ifndef HAVE_SETUP_TC
3319 #define HAVE_SETUP_TC
3322 #ifndef HAVE_DCBNL_IEEE
3323 #define HAVE_DCBNL_IEEE
3325 #endif /* CONFIG_DCB */
3326 #ifndef HAVE_NDO_SET_FEATURES
3327 #define HAVE_NDO_SET_FEATURES
3329 #endif /* < 2.6.39 */
3331 /*****************************************************************************/
3332 /* use < 2.6.40 because of a Fedora 15 kernel update where they
3333 * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
3334 * like set_phys_id for ethtool.
3336 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
3337 #ifdef ETHTOOL_GRXRINGS
3339 #define FLOW_EXT 0x80000000
3340 union _kc_ethtool_flow_union {
3341 struct ethtool_tcpip4_spec tcp_ip4_spec;
3342 struct ethtool_usrip4_spec usr_ip4_spec;
3345 struct _kc_ethtool_flow_ext {
3350 struct _kc_ethtool_rx_flow_spec {
3352 union _kc_ethtool_flow_union h_u;
3353 struct _kc_ethtool_flow_ext h_ext;
3354 union _kc_ethtool_flow_union m_u;
3355 struct _kc_ethtool_flow_ext m_ext;
3359 #define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
3360 #endif /* FLOW_EXT */
3363 #define pci_disable_link_state_locked pci_disable_link_state
3365 #ifndef PCI_LTR_VALUE_MASK
3366 #define PCI_LTR_VALUE_MASK 0x000003ff
3368 #ifndef PCI_LTR_SCALE_MASK
3369 #define PCI_LTR_SCALE_MASK 0x00001c00
3371 #ifndef PCI_LTR_SCALE_SHIFT
3372 #define PCI_LTR_SCALE_SHIFT 10
3375 #else /* < 2.6.40 */
3376 #define HAVE_ETHTOOL_SET_PHYS_ID
3377 #endif /* < 2.6.40 */
3379 /*****************************************************************************/
3380 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
3381 #define USE_LEGACY_PM_SUPPORT
3382 #endif /* < 3.0.0 */
3384 /*****************************************************************************/
3385 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
3386 #ifndef __netdev_alloc_skb_ip_align
3387 #define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
3388 #endif /* __netdev_alloc_skb_ip_align */
3389 #define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
3390 #define dcb_ieee_delapp(dev, app) 0
3391 #define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
3393 /* 1000BASE-T Control register */
3394 #define CTL1000_AS_MASTER 0x0800
3395 #define CTL1000_ENABLE_MASTER 0x1000
3398 #ifndef HAVE_DCBNL_IEEE_DELAPP
3399 #define HAVE_DCBNL_IEEE_DELAPP
3401 #endif /* < 3.1.0 */
3403 /*****************************************************************************/
3404 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
3405 #ifdef ETHTOOL_GRXRINGS
3406 #define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
3407 #endif /* ETHTOOL_GRXRINGS */
3409 #ifndef skb_frag_size
3410 #define skb_frag_size(frag) _kc_skb_frag_size(frag)
3411 static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
3415 #endif /* skb_frag_size */
3417 #ifndef skb_frag_size_sub
3418 #define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta)
3419 static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
3421 frag->size -= delta;
3423 #endif /* skb_frag_size_sub */
3425 #ifndef skb_frag_page
3426 #define skb_frag_page(frag) _kc_skb_frag_page(frag)
3427 static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
3431 #endif /* skb_frag_page */
3433 #ifndef skb_frag_address
3434 #define skb_frag_address(frag) _kc_skb_frag_address(frag)
3435 static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
3437 return page_address(skb_frag_page(frag)) + frag->page_offset;
3439 #endif /* skb_frag_address */
3441 #ifndef skb_frag_dma_map
3442 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
3443 #include <linux/dma-mapping.h>
3445 #define skb_frag_dma_map(dev,frag,offset,size,dir) \
3446 _kc_skb_frag_dma_map(dev,frag,offset,size,dir)
3447 static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
3448 const skb_frag_t *frag,
3449 size_t offset, size_t size,
3450 enum dma_data_direction dir)
3452 return dma_map_page(dev, skb_frag_page(frag),
3453 frag->page_offset + offset, size, dir);
3455 #endif /* skb_frag_dma_map */
3457 #ifndef __skb_frag_unref
3458 #define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
3459 static inline void __kc_skb_frag_unref(skb_frag_t *frag)
3461 put_page(skb_frag_page(frag));
3463 #endif /* __skb_frag_unref */
3465 #ifndef SPEED_UNKNOWN
3466 #define SPEED_UNKNOWN -1
3468 #ifndef DUPLEX_UNKNOWN
3469 #define DUPLEX_UNKNOWN 0xff
3471 #if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))
3472 #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
3473 #define HAVE_PCI_DEV_FLAGS_ASSIGNED
3477 #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
3478 #define HAVE_PCI_DEV_FLAGS_ASSIGNED
3479 #define HAVE_VF_SPOOFCHK_CONFIGURE
3481 #endif /* < 3.2.0 */
3483 #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
3484 #undef ixgbe_get_netdev_tc_txq
3485 #define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
3487 /*****************************************************************************/
3488 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
3489 typedef u32 kni_netdev_features_t;
3490 #undef PCI_EXP_TYPE_RC_EC
3491 #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
3493 #define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
3494 #define netdev_completed_queue(_n, _p, _b) do {} while (0)
3495 #define netdev_tx_sent_queue(_q, _b) do {} while (0)
3496 #define netdev_sent_queue(_n, _b) do {} while (0)
3497 #define netdev_tx_reset_queue(_q) do {} while (0)
3498 #define netdev_reset_queue(_n) do {} while (0)
3500 #else /* ! < 3.3.0 */
3501 typedef netdev_features_t kni_netdev_features_t;
3502 #define HAVE_INT_NDO_VLAN_RX_ADD_VID
3503 #ifdef ETHTOOL_SRXNTUPLE
3504 #undef ETHTOOL_SRXNTUPLE
3506 #endif /* < 3.3.0 */
3508 /*****************************************************************************/
3509 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
3510 #ifndef NETIF_F_RXFCS
3511 #define NETIF_F_RXFCS 0
3512 #endif /* NETIF_F_RXFCS */
3513 #ifndef NETIF_F_RXALL
3514 #define NETIF_F_RXALL 0
3515 #endif /* NETIF_F_RXALL */
3517 #if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
3518 #define NUMTCS_RETURNS_U8
3520 int _kc_simple_open(struct inode *inode, struct file *file);
3521 #define simple_open _kc_simple_open
3522 #endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
3525 #ifndef skb_add_rx_frag
3526 #define skb_add_rx_frag _kc_skb_add_rx_frag
3527 extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
3528 int, int, unsigned int);
3530 #ifdef NET_ADDR_RANDOM
3531 #define eth_hw_addr_random(N) do { \
3532 random_ether_addr(N->dev_addr); \
3533 N->addr_assign_type |= NET_ADDR_RANDOM; \
3535 #else /* NET_ADDR_RANDOM */
3536 #define eth_hw_addr_random(N) random_ether_addr(N->dev_addr)
3537 #endif /* NET_ADDR_RANDOM */
3539 #include <linux/kconfig.h>
3540 #endif /* >= 3.4.0 */
3542 /*****************************************************************************/
3543 #if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP)
3544 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
3545 #define HAVE_PTP_1588_CLOCK
3547 #error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel
3548 #endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
3549 #endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */
3551 /*****************************************************************************/
3552 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
3553 #define skb_tx_timestamp(skb) do {} while (0)
3554 static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
3556 return !compare_ether_addr(addr1, addr2);
3558 #define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
3560 #define HAVE_FDB_OPS
3561 #define HAVE_ETHTOOL_GET_TS_INFO
3562 #endif /* < 3.5.0 */
3564 /*****************************************************************************/
3565 #include <linux/mdio.h>
3566 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
3567 #define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
3569 #ifndef MDIO_EEE_100TX
3570 #define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */
3572 #ifndef MDIO_EEE_1000T
3573 #define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */
3575 #ifndef MDIO_EEE_10GT
3576 #define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
3578 #ifndef MDIO_EEE_1000KX
3579 #define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
3581 #ifndef MDIO_EEE_10GKX4
3582 #define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
3584 #ifndef MDIO_EEE_10GKR
3585 #define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
3587 #endif /* < 3.6.0 */
3589 /******************************************************************************/
3590 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
3591 #ifndef ADVERTISED_40000baseKR4_Full
3592 /* these defines were all added in one commit, so should be safe
3593 * to trigger activiation on one define
3595 #define SUPPORTED_40000baseKR4_Full (1 << 23)
3596 #define SUPPORTED_40000baseCR4_Full (1 << 24)
3597 #define SUPPORTED_40000baseSR4_Full (1 << 25)
3598 #define SUPPORTED_40000baseLR4_Full (1 << 26)
3599 #define ADVERTISED_40000baseKR4_Full (1 << 23)
3600 #define ADVERTISED_40000baseCR4_Full (1 << 24)
3601 #define ADVERTISED_40000baseSR4_Full (1 << 25)
3602 #define ADVERTISED_40000baseLR4_Full (1 << 26)
3606 * mmd_eee_cap_to_ethtool_sup_t
3607 * @eee_cap: value of the MMD EEE Capability register
3609 * A small helper function that translates MMD EEE Capability (3.20) bits
3610 * to ethtool supported settings.
3612 static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
3616 if (eee_cap & MDIO_EEE_100TX)
3617 supported |= SUPPORTED_100baseT_Full;
3618 if (eee_cap & MDIO_EEE_1000T)
3619 supported |= SUPPORTED_1000baseT_Full;
3620 if (eee_cap & MDIO_EEE_10GT)
3621 supported |= SUPPORTED_10000baseT_Full;
3622 if (eee_cap & MDIO_EEE_1000KX)
3623 supported |= SUPPORTED_1000baseKX_Full;
3624 if (eee_cap & MDIO_EEE_10GKX4)
3625 supported |= SUPPORTED_10000baseKX4_Full;
3626 if (eee_cap & MDIO_EEE_10GKR)
3627 supported |= SUPPORTED_10000baseKR_Full;
3631 #define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
3632 __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
3635 * mmd_eee_adv_to_ethtool_adv_t
3636 * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
3638 * A small helper function that translates the MMD EEE Advertisement (7.60)
3639 * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
3642 static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
3646 if (eee_adv & MDIO_EEE_100TX)
3647 adv |= ADVERTISED_100baseT_Full;
3648 if (eee_adv & MDIO_EEE_1000T)
3649 adv |= ADVERTISED_1000baseT_Full;
3650 if (eee_adv & MDIO_EEE_10GT)
3651 adv |= ADVERTISED_10000baseT_Full;
3652 if (eee_adv & MDIO_EEE_1000KX)
3653 adv |= ADVERTISED_1000baseKX_Full;
3654 if (eee_adv & MDIO_EEE_10GKX4)
3655 adv |= ADVERTISED_10000baseKX4_Full;
3656 if (eee_adv & MDIO_EEE_10GKR)
3657 adv |= ADVERTISED_10000baseKR_Full;
3661 #define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
3662 __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
3665 * ethtool_adv_to_mmd_eee_adv_t
3666 * @adv: the ethtool advertisement settings
3668 * A small helper function that translates ethtool advertisement settings
3669 * to EEE advertisements for the MMD EEE Advertisement (7.60) and
3670 * MMD EEE Link Partner Ability (7.61) registers.
3672 static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
3676 if (adv & ADVERTISED_100baseT_Full)
3677 reg |= MDIO_EEE_100TX;
3678 if (adv & ADVERTISED_1000baseT_Full)
3679 reg |= MDIO_EEE_1000T;
3680 if (adv & ADVERTISED_10000baseT_Full)
3681 reg |= MDIO_EEE_10GT;
3682 if (adv & ADVERTISED_1000baseKX_Full)
3683 reg |= MDIO_EEE_1000KX;
3684 if (adv & ADVERTISED_10000baseKX4_Full)
3685 reg |= MDIO_EEE_10GKX4;
3686 if (adv & ADVERTISED_10000baseKR_Full)
3687 reg |= MDIO_EEE_10GKR;
3691 #define ethtool_adv_to_mmd_eee_adv_t(adv) \
3692 __kc_ethtool_adv_to_mmd_eee_adv_t(adv)
3694 #ifndef pci_pcie_type
3695 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
3696 static inline u8 pci_pcie_type(struct pci_dev *pdev)
3701 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3704 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
3705 return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
3707 #else /* < 2.6.24 */
3708 #define pci_pcie_type(x) (x)->pcie_type
3709 #endif /* < 2.6.24 */
3710 #endif /* pci_pcie_type */
3712 #define ptp_clock_register(caps, args...) ptp_clock_register(caps)
3714 #ifndef PCI_EXP_LNKSTA2
3715 int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
3716 #define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
3717 int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
3718 #define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
3719 int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
3720 u16 clear, u16 set);
3721 #define pcie_capability_clear_and_set_word(d,p,c,s) \
3722 __kc_pcie_capability_clear_and_set_word(d,p,c,s)
3724 #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
3726 static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
3729 return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
3731 #endif /* !PCI_EXP_LNKSTA2 */
3733 #if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
3734 #define USE_CONST_DEV_UC_CHAR
3737 #else /* >= 3.7.0 */
3738 #define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
3739 #define USE_CONST_DEV_UC_CHAR
3740 #endif /* >= 3.7.0 */
3742 /*****************************************************************************/
3743 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
3744 #ifndef PCI_EXP_LNKCTL_ASPM_L0S
3745 #define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
3747 #ifndef PCI_EXP_LNKCTL_ASPM_L1
3748 #define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
3750 #define HAVE_CONFIG_HOTPLUG
3751 /* Reserved Ethernet Addresses per IEEE 802.1Q */
3752 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
3753 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
3754 #if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) &&\
3755 !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
3756 static inline bool is_link_local_ether_addr(const u8 *addr)
3758 __be16 *a = (__be16 *)addr;
3759 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
3760 static const __be16 m = cpu_to_be16(0xfff0);
3762 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
3764 #endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
3765 #else /* >= 3.8.0 */
3768 #define HAVE_ENCAP_CSUM_OFFLOAD
3771 #ifndef __devinitdata
3772 #define __devinitdata
3783 #ifndef HAVE_SRIOV_CONFIGURE
3784 #define HAVE_SRIOV_CONFIGURE
3787 #define HAVE_BRIDGE_ATTRIBS
3788 #ifndef BRIDGE_MODE_VEB
3789 #define BRIDGE_MODE_VEB 0 /* Default loopback mode */
3790 #endif /* BRIDGE_MODE_VEB */
3791 #ifndef BRIDGE_MODE_VEPA
3792 #define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */
3793 #endif /* BRIDGE_MODE_VEPA */
3794 #endif /* >= 3.8.0 */
3796 /*****************************************************************************/
3797 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
3800 #define hlist_entry(ptr, type, member) container_of(ptr,type,member)
3802 #undef hlist_entry_safe
3803 #define hlist_entry_safe(ptr, type, member) \
3804 (ptr) ? hlist_entry(ptr, type, member) : NULL
3806 #undef hlist_for_each_entry
3807 #define hlist_for_each_entry(pos, head, member) \
3808 for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
3810 pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
3812 #undef hlist_for_each_entry_safe
3813 #define hlist_for_each_entry_safe(pos, n, head, member) \
3814 for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \
3815 pos && ({ n = pos->member.next; 1; }); \
3816 pos = hlist_entry_safe(n, typeof(*pos), member))
3819 extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
3820 #define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
3821 #else /* CONFIG_XPS */
3822 #define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
3823 #endif /* CONFIG_XPS */
3825 #ifdef HAVE_NETDEV_SELECT_QUEUE
3826 #define _kc_hashrnd 0xd631614b /* not so random hash salt */
3827 extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
3828 #define __netdev_pick_tx __kc_netdev_pick_tx
3829 #endif /* HAVE_NETDEV_SELECT_QUEUE */
3831 #define HAVE_BRIDGE_FILTER
3832 #define USE_DEFAULT_FDB_DEL_DUMP
3833 #endif /* < 3.9.0 */
3835 /*****************************************************************************/
3836 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
3837 #ifdef CONFIG_PCI_IOV
3838 extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
3840 static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
3845 #define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
3847 #ifndef VLAN_TX_COOKIE_MAGIC
3848 static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
3851 #ifdef VLAN_TAG_PRESENT
3852 vlan_tci |= VLAN_TAG_PRESENT;
3854 skb->vlan_tci = vlan_tci;
3857 #define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
3858 __kc__vlan_hwaccel_put_tag(skb, vlan_tci)
3861 #else /* >= 3.10.0 */
3862 #define HAVE_ENCAP_TSO_OFFLOAD
3863 #endif /* >= 3.10.0 */
3865 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
3866 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)))
3867 #if (!(UBUNTU_KERNEL_CODE >= UBUNTU_KERNEL_VERSION(3,13,0,30,0) \
3868 && (UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(12,4) \
3869 || UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(14,4))))
3870 #if (!(SLE_VERSION_CODE == SLE_VERSION(12,0,0)))
3871 #ifdef NETIF_F_RXHASH
3872 #define PKT_HASH_TYPE_L3 0
3874 skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
3878 #endif /* NETIF_F_RXHASH */
3879 #endif /* < SLES12 */
3880 #endif /* < 3.13.0-30.54 (Ubuntu 14.04) */
3881 #endif /* < RHEL7 */
3882 #endif /* < 3.14.0 */
3884 #if (( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) ) \
3885 || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3886 #undef SET_ETHTOOL_OPS
3887 #define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
3888 #define HAVE_VF_MIN_MAX_TXRATE 1
3889 #endif /* >= 3.16.0 */
3891 #if (( LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) ) \
3892 || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3893 #define HAVE_NDO_DFLT_BRIDGE_ADD_MASK
3894 #if (!( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3895 #define HAVE_NDO_FDB_ADD_VID
3896 #endif /* !RHEL 7.2 */
3897 #endif /* >= 3.19.0 */
3899 #if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) ) \
3900 || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3901 /* vlan_tx_xx functions got renamed to skb_vlan */
3902 #define vlan_tx_tag_get skb_vlan_tag_get
3903 #define vlan_tx_tag_present skb_vlan_tag_present
3904 #if (!( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3905 #define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
3906 #endif /* !RHEL 7.2 */
3909 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) )
3910 /* ndo_bridge_getlink adds new nlflags parameter */
3911 #define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
3912 #endif /* >= 4.1.0 */
3914 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) )
3915 /* ndo_bridge_getlink adds new filter_mask and vlan_fill parameters */
3916 #define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL
3917 #endif /* >= 4.2.0 */
3918 #endif /* _KCOMPAT_H_ */