X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eal%2Flinuxapp%2Fkni%2Fethtool%2Figb%2Fkcompat.h;h=44b9ebf819e011659f56b18676e56e777e4dc582;hb=364ea7748187b446cdb1c03ce9fc28773f242b47;hp=553e1c0e03cda419911ca2cd058f946826a6d1e7;hpb=3fc5ca2f63529ec5ad7b1b75f517d61dc7613bd9;p=dpdk.git diff --git a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h index 553e1c0e03..44b9ebf819 100644 --- a/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h +++ b/lib/librte_eal/linuxapp/kni/ethtool/igb/kcompat.h @@ -1,7 +1,7 @@ /******************************************************************************* Intel(R) Gigabit Ethernet Linux driver - Copyright(c) 2007-2012 Intel Corporation. + Copyright(c) 2007-2013 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -267,7 +267,7 @@ struct msix_entry { #if !defined(IXGBE_DCA) && !defined(IGB_DCA) #define dca_get_tag(b) 0 #define dca_add_requester(a) -1 -#define dca_remove_requester(b) do { } while(0) +#define dca_remove_requester(b) do { } while(0) #define DCA_PROVIDER_ADD 0x0001 #define DCA_PROVIDER_REMOVE 0x0002 #endif @@ -287,7 +287,25 @@ struct msix_entry { #define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) #endif -#ifndef NETIF_F_HW_VLAN_TX +#ifdef IS_ENABLED +#undef IS_ENABLED +#undef __ARG_PLACEHOLDER_1 +#undef config_enabled +#undef _config_enabled +#undef __config_enabled +#undef ___config_enabled +#endif + +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) struct _kc_vlan_ethhdr { unsigned char h_dest[ETH_ALEN]; unsigned char h_source[ETH_ALEN]; @@ -301,9 +319,11 @@ struct _kc_vlan_hdr { __be16 h_vlan_encapsulated_proto; }; #define vlan_hdr _kc_vlan_hdr +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) #define vlan_tx_tag_present(_skb) 0 #define vlan_tx_tag_get(_skb) 0 #endif +#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ #ifndef VLAN_PRIO_SHIFT #define VLAN_PRIO_SHIFT 13 @@ -314,6 +334,10 @@ struct _kc_vlan_hdr { #define __GFP_COLD 0 #endif +#ifndef __GFP_COMP +#define __GFP_COMP 0 +#endif + /*****************************************************************************/ /* Installations with ethtool version without eeprom, adapter id, or statistics * support */ @@ -644,40 +668,65 @@ struct _kc_ethtool_pauseparam { #define ETHTOOL_BUSINFO_LEN 32 #endif -#ifndef RHEL_RELEASE_CODE -/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ -#define RHEL_RELEASE_CODE 0 -#endif #ifndef RHEL_RELEASE_VERSION #define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) #endif +#ifndef AX_RELEASE_VERSION +#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) +#endif + #ifndef AX_RELEASE_CODE #define AX_RELEASE_CODE 0 #endif -#ifndef AX_RELEASE_VERSION -#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) + +#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) +#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) +#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) +#endif + +#ifndef RHEL_RELEASE_CODE +/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ +#define RHEL_RELEASE_CODE 0 #endif /* SuSE version macro is the same as Linux kernel version */ #ifndef SLE_VERSION #define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) #endif -#ifndef SLE_VERSION_CODE #ifdef CONFIG_SUSE_KERNEL -/* SLES11 GA is 2.6.27 based */ #if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) +/* SLES11 GA is 2.6.27 based */ #define SLE_VERSION_CODE SLE_VERSION(11,0,0) #elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) /* SLES11 SP1 is 2.6.32 based */ #define SLE_VERSION_CODE SLE_VERSION(11,1,0) -#else -#define SLE_VERSION_CODE 0 -#endif -#else /* CONFIG_SUSE_KERNEL */ -#define SLE_VERSION_CODE 0 +#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \ + (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0))) +/* SLES11 SP3 is at least 3.0.61+ based */ +#define SLE_VERSION_CODE SLE_VERSION(11,3,0) +#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ #endif /* CONFIG_SUSE_KERNEL */ +#ifndef SLE_VERSION_CODE +#define SLE_VERSION_CODE 0 #endif /* SLE_VERSION_CODE */ +/* Ubuntu release and kernel codes must be specified from Makefile */ +#ifndef UBUNTU_RELEASE_VERSION +#define UBUNTU_RELEASE_VERSION(a,b) (((a) * 100) + (b)) +#endif +#ifndef UBUNTU_KERNEL_VERSION +#define UBUNTU_KERNEL_VERSION(a,b,c,abi,upload) (((a) << 40) + ((b) << 32) + ((c) << 24) + ((abi) << 8) + (upload)) +#endif +#ifndef UBUNTU_RELEASE_CODE +#define UBUNTU_RELEASE_CODE 0 +#endif +#ifndef UBUNTU_KERNEL_CODE +#define UBUNTU_KERNEL_CODE 0 +#endif + #ifdef __KLOCWORK__ #ifdef ARRAY_SIZE #undef ARRAY_SIZE @@ -955,6 +1004,17 @@ struct vlan_ethhdr { #define __devexit_p(x) &(x) #endif +#else + /* For Kernel 3.8 these are not defined - so undefine all */ + #undef __devexit_p + #undef __devexit + #undef __devinit + #undef __devinitdata + #define __devexit_p(x) &(x) + #define __devexit + #define __devinit + #define __devinitdata + #endif /* 2.4.17 => 2.4.13 */ /*****************************************************************************/ @@ -987,6 +1047,13 @@ struct vlan_ethhdr { /* 2.4.22 => 2.4.17 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) #define pci_name(x) ((x)->slot_name) + +#ifndef SUPPORTED_10000baseT_Full +#define SUPPORTED_10000baseT_Full (1 << 12) +#endif +#ifndef ADVERTISED_10000baseT_Full +#define ADVERTISED_10000baseT_Full (1 << 12) +#endif #endif /*****************************************************************************/ @@ -1044,6 +1111,11 @@ static inline void _kc_netif_tx_disable(struct net_device *dev) #define ETHTOOL_OPS_COMPAT #endif /* 2.6.4 => 2.6.0 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) +#define __user +#endif /* < 2.4.27 */ + /*****************************************************************************/ /* 2.5.71 => 2.4.x */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) @@ -1112,7 +1184,7 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) return (struct device *) pdev; } -#define pdev_printk(lvl, pdev, fmt, args...) \ +#define pdev_printk(lvl, pdev, fmt, args...) \ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) #define dev_err(dev, fmt, args...) \ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) @@ -1120,6 +1192,10 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) #define dev_warn(dev, fmt, args...) \ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) +#define dev_notice(dev, fmt, args...) \ + pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) +#define dev_dbg(dev, fmt, args...) \ + pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) /* NOTE: dangerous! we ignore the 'gfp' argument */ #define dma_alloc_coherent(dev,sz,dma,gfp) \ @@ -1137,6 +1213,11 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) #define dma_unmap_single(dev,a,b,c) \ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) +#define dma_map_sg(dev, sg, nents, dir) \ + pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) +#define dma_unmap_sg(dev, sg, nents, dir) \ + pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) + #define dma_sync_single(dev,a,b,c) \ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) @@ -1194,19 +1275,6 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h) h->next = NULL; h->pprev = NULL; } -#define hlist_entry(ptr, type, member) container_of(ptr,type,member) - -#define hlist_for_each_entry(tpos, pos, head, member) \ - for (pos = (head)->first; \ - pos && ({ prefetch(pos->next); 1;}) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = pos->next) - -#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ - for (pos = (head)->first; \ - pos && ({ n = pos->next; 1; }) && \ - ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ - pos = n) #ifndef might_sleep #define might_sleep() @@ -1222,13 +1290,6 @@ static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) /* 2.5.28 => 2.4.23 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) -static inline void _kc_synchronize_irq(void) -{ - synchronize_irq(); -} -#undef synchronize_irq -#define synchronize_irq(X) _kc_synchronize_irq() - #include #define work_struct tq_struct #undef INIT_WORK @@ -1244,6 +1305,10 @@ static inline void _kc_synchronize_irq(void) /*****************************************************************************/ /* 2.6.0 => 2.5.28 */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#ifndef read_barrier_depends +#define read_barrier_depends() rmb() +#endif + #undef get_cpu #define get_cpu() smp_processor_id() #undef put_cpu @@ -1306,7 +1371,54 @@ static inline const char *_kc_netdev_name(const struct net_device *dev) extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); #endif /* strlcpy */ +#ifndef do_div +#if BITS_PER_LONG == 64 +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + __rem = ((uint64_t)(n)) % __base; \ + (n) = ((uint64_t)(n)) / __base; \ + __rem; \ + }) +#elif BITS_PER_LONG == 32 +extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); +# define do_div(n,base) ({ \ + uint32_t __base = (base); \ + uint32_t __rem; \ + if (likely(((n) >> 32) == 0)) { \ + __rem = (uint32_t)(n) % __base; \ + (n) = (uint32_t)(n) / __base; \ + } else \ + __rem = _kc__div64_32(&(n), __base); \ + __rem; \ + }) +#else /* BITS_PER_LONG == ?? */ +# error do_div() does not yet support the C64 +#endif /* BITS_PER_LONG */ +#endif /* do_div */ + +#ifndef NSEC_PER_SEC +#define NSEC_PER_SEC 1000000000L +#endif + +#undef HAVE_I2C_SUPPORT +#else /* 2.6.0 */ +#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \ + (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9))) +#define HAVE_I2C_SUPPORT +#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */ + #endif /* 2.6.0 => 2.5.28 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) +#define dma_pool pci_pool +#define dma_pool_destroy pci_pool_destroy +#define dma_pool_alloc pci_pool_alloc +#define dma_pool_free pci_pool_free + +#define dma_pool_create(name,dev,size,align,allocation) \ + pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) +#endif /* < 2.6.3 */ /*****************************************************************************/ /* 2.6.4 => 2.6.0 */ @@ -1383,6 +1495,16 @@ static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) #ifndef PCI_EXP_DEVCTL_CERE #define PCI_EXP_DEVCTL_CERE 0x0001 #endif +#define PCI_EXP_FLAGS 2 /* Capabilities register */ +#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ +#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ +#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ +#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ +#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ +#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ +#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ +#define PCI_EXP_DEVCAP 4 /* Device capabilities */ +#define PCI_EXP_DEVSTA 10 /* Device Status */ #define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ schedule_timeout((x * HZ)/1000 + 2); \ } while (0) @@ -1524,6 +1646,7 @@ static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); #undef node_online_map #define node_online_map _kcompat_node_online_map +#define pci_get_class pci_find_class #endif /* < 2.6.10 */ /*****************************************************************************/ @@ -1572,6 +1695,15 @@ static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; #endif } + +#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ +#define PCI_EXP_LNKSTA 18 /* Link Status */ +#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ +#define PCI_EXP_SLTCTL 24 /* Slot Control */ +#define PCI_EXP_SLTSTA 26 /* Slot Status */ +#define PCI_EXP_RTCTL 28 /* Root Control */ +#define PCI_EXP_RTCAP 30 /* Root Capabilities */ +#define PCI_EXP_RTSTA 32 /* Root Status */ #endif /* < 2.6.11 */ /*****************************************************************************/ @@ -1585,8 +1717,16 @@ static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) /* Advertisement control register. */ #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ +/* Link partner ability register. */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ /* 1000BASE-T Control register */ #define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ + #ifndef is_zero_ether_addr #define is_zero_ether_addr _kc_is_zero_ether_addr static inline int _kc_is_zero_ether_addr(const u8 *addr) @@ -1627,6 +1767,8 @@ extern void *_kc_kzalloc(size_t size, int flags); #define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ #define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) #define ADVERTISED_Pause (1 << 13) #define ADVERTISED_Asym_Pause (1 << 14) @@ -1642,10 +1784,10 @@ typedef unsigned gfp_t; #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) #ifdef CONFIG_X86_64 -#define dma_sync_single_range_for_cpu(dev, dma_handle, offset, size, dir) \ - dma_sync_single_for_cpu(dev, dma_handle, size, dir) -#define dma_sync_single_range_for_device(dev, dma_handle, offset, size, dir) \ - dma_sync_single_for_device(dev, dma_handle, size, dir) +#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ + dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) +#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ + dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) #endif #endif #endif /* < 2.6.14 */ @@ -1700,10 +1842,21 @@ static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) #else /* 2.6.16 and above */ #undef HAVE_PCI_ERS #define HAVE_PCI_ERS +#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) +#ifdef device_can_wakeup +#undef device_can_wakeup +#endif /* device_can_wakeup */ +#define device_can_wakeup(dev) 1 +#endif /* SLE_VERSION(10,4,0) */ #endif /* < 2.6.16 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) +#ifndef dev_notice +#define dev_notice(dev, fmt, args...) \ + dev_printk(KERN_NOTICE, dev, fmt, ## args) +#endif + #ifndef first_online_node #define first_online_node 0 #endif @@ -1793,15 +1946,21 @@ static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) +#define i_private u.generic_ip +#endif /* >= RHEL 5.0 */ + #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) #endif +#ifndef __ALIGN_MASK +#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) +#endif #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) #if (!((RHEL_RELEASE_CODE && \ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ - (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))) || \ - (AX_RELEASE_CODE && AX_RELEASE_CODE > AX_RELEASE_VERSION(3,0)))) + (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); #endif #if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) @@ -1886,6 +2045,10 @@ do { \ PCI_ANY_ID, PCI_ANY_ID, 0, 0 #endif +#ifndef PCI_VENDOR_ID_INTEL +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + #ifndef round_jiffies #define round_jiffies(x) x #endif @@ -1938,6 +2101,20 @@ static inline __wsum csum_unfold(__sum16 n) #define pci_request_selected_regions(pdev, bars, name) \ pci_request_regions(pdev, name) #define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); + +#ifndef __aligned +#define __aligned(x) __attribute__((aligned(x))) +#endif + +extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); +#define netdev_to_dev(netdev) \ + pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) +#else +static inline struct device *netdev_to_dev(struct net_device *netdev) +{ + return &netdev->dev; +} + #endif /* < 2.6.21 */ /*****************************************************************************/ @@ -1955,6 +2132,10 @@ static inline __wsum csum_unfold(__sum16 n) do { \ skb->tail = skb->data; \ } while (0) +#define skb_set_tail_pointer(skb, offset) \ + do { \ + skb->tail = skb->data + offset; \ + } while (0) #define skb_copy_to_linear_data(skb, from, len) \ memcpy(skb->data, from, len) #define skb_copy_to_linear_data_offset(skb, offset, from, len) \ @@ -2002,6 +2183,31 @@ extern void _kc_print_hex_dump(const char *level, const char *prefix_str, const void *buf, size_t len, bool ascii); #define print_hex_dump(lvl, s, t, r, g, b, l, a) \ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) +#ifndef ADVERTISED_2500baseX_Full +#define ADVERTISED_2500baseX_Full (1 << 15) +#endif +#ifndef SUPPORTED_2500baseX_Full +#define SUPPORTED_2500baseX_Full (1 << 15) +#endif + +#ifdef HAVE_I2C_SUPPORT +#include +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) +struct i2c_board_info { + char driver_name[KOBJ_NAME_LEN]; + char type[I2C_NAME_SIZE]; + unsigned short flags; + unsigned short addr; + void *platform_data; +}; +#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\ + .addr = (dev_addr) +#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ +#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info) +extern struct i2c_client * +_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info); +#endif /* HAVE_I2C_SUPPORT */ + #else /* 2.6.22 */ #define ETH_TYPE_TRANS_SETS_DEV #define HAVE_NETDEV_STATS_IN_NETDEV @@ -2048,7 +2254,7 @@ struct napi_struct { #ifdef NAPI extern int __kc_adapter_clean(struct net_device *, int *); -extern struct net_device *napi_to_poll_dev(struct napi_struct *napi); +extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); #define netif_napi_add(_netdev, _napi, _poll, _weight) \ do { \ struct napi_struct *__napi = (_napi); \ @@ -2079,6 +2285,19 @@ extern struct net_device *napi_to_poll_dev(struct napi_struct *napi); } while (0) #define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) #define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) +#ifdef CONFIG_SMP +static inline void napi_synchronize(const struct napi_struct *n) +{ + struct net_device *dev = napi_to_poll_dev(n); + + while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { + /* No hurry. */ + msleep(1); + } +} +#else +#define napi_synchronize(n) barrier() +#endif /* CONFIG_SMP */ #define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) #ifndef NETIF_F_GRO #define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi)) @@ -2120,6 +2339,10 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) #ifndef KERN_CONT #define KERN_CONT "" #endif +#ifndef pr_err +#define pr_err(fmt, arg...) \ + printk(KERN_ERR fmt, ##arg) +#endif #else /* < 2.6.24 */ #define HAVE_ETHTOOL_GET_SSET_COUNT #define HAVE_NETDEV_NAPI_LIST @@ -2167,16 +2390,20 @@ static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) #endif /* DEFINE_PCI_DEVICE_TABLE */ +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) #ifndef IGB_PROCFS #define IGB_PROCFS #endif /* IGB_PROCFS */ +#endif /* >= 2.6.0 */ #else /* < 2.6.25 */ -#ifndef IGB_SYSFS -#define IGB_SYSFS -#endif /* IGB_SYSFS */ +#if IS_ENABLED(CONFIG_HWMON) +#ifndef IGB_HWMON +#define IGB_HWMON +#endif /* IGB_HWMON */ +#endif /* CONFIG_HWMON */ #endif /* < 2.6.25 */ @@ -2198,6 +2425,9 @@ extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); #else /* < 2.6.26 */ #include #define HAVE_NETDEV_VLAN_FEATURES +#ifndef PCI_EXP_LNKCAP_ASPMS +#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ +#endif /* PCI_EXP_LNKCAP_ASPMS */ #endif /* < 2.6.26 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) @@ -2309,9 +2539,15 @@ extern void __kc_warn_slowpath(const char *file, const int line, unlikely(__ret_warn_on); \ }) #endif /* WARN */ +#undef HAVE_IXGBE_DEBUG_FS +#undef HAVE_IGB_DEBUG_FS #else /* < 2.6.27 */ #define HAVE_TX_MQ #define HAVE_NETDEV_SELECT_QUEUE +#ifdef CONFIG_DEBUG_FS +#define HAVE_IXGBE_DEBUG_FS +#define HAVE_IGB_DEBUG_FS +#endif /* CONFIG_DEBUG_FS */ #endif /* < 2.6.27 */ /*****************************************************************************/ @@ -2331,10 +2567,10 @@ static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) } #define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) #endif -#ifndef skb_add_rx_frag -#define skb_add_rx_frag _kc_skb_add_rx_frag -extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); -#endif + +#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ + #endif /* < 2.6.28 */ /*****************************************************************************/ @@ -2351,6 +2587,17 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #ifndef pcie_aspm_enabled #define pcie_aspm_enabled() (1) #endif /* pcie_aspm_enabled */ + +#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ + +#ifndef pci_clear_master +extern void _kc_pci_clear_master(struct pci_dev *dev); +#define pci_clear_master(dev) _kc_pci_clear_master(dev) +#endif + +#ifndef PCI_EXP_LNKCTL_ASPMC +#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ +#endif #else /* < 2.6.29 */ #ifndef HAVE_NET_DEVICE_OPS #define HAVE_NET_DEVICE_OPS @@ -2366,10 +2613,6 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #define skb_get_rx_queue(a) 0 #define skb_record_rx_queue(a, b) do {} while (0) #define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) -#ifdef IXGBE_FCOE -#undef CONFIG_FCOE -#undef CONFIG_FCOE_MODULE -#endif /* IXGBE_FCOE */ #ifndef CONFIG_PCI_IOV #undef pci_enable_sriov #define pci_enable_sriov(a, b) -ENOTSUPP @@ -2380,7 +2623,20 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #define pr_cont(fmt, ...) \ printk(KERN_CONT fmt, ##__VA_ARGS__) #endif /* pr_cont */ -#else +static inline void _kc_synchronize_irq(unsigned int a) +{ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + synchronize_irq(); +#else /* < 2.5.28 */ + synchronize_irq(a); +#endif /* < 2.5.28 */ +} +#undef synchronize_irq +#define synchronize_irq(a) _kc_synchronize_irq(a) + +#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ + +#else /* < 2.6.30 */ #define HAVE_ASPM_QUIRKS #endif /* < 2.6.30 */ @@ -2395,7 +2651,40 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #define netdev_for_each_uc_addr(uclist, dev) \ for (uclist = dev->uc_list; uclist; uclist = uclist->next) #endif -#else +#ifndef PORT_OTHER +#define PORT_OTHER 0xff +#endif +#ifndef MDIO_PHY_ID_PRTAD +#define MDIO_PHY_ID_PRTAD 0x03e0 +#endif +#ifndef MDIO_PHY_ID_DEVAD +#define MDIO_PHY_ID_DEVAD 0x001f +#endif +#ifndef skb_dst +#define skb_dst(s) ((s)->dst) +#endif + +#ifndef SUPPORTED_1000baseKX_Full +#define SUPPORTED_1000baseKX_Full (1 << 17) +#endif +#ifndef SUPPORTED_10000baseKX4_Full +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#endif +#ifndef SUPPORTED_10000baseKR_Full +#define SUPPORTED_10000baseKR_Full (1 << 19) +#endif + +#ifndef ADVERTISED_1000baseKX_Full +#define ADVERTISED_1000baseKX_Full (1 << 17) +#endif +#ifndef ADVERTISED_10000baseKX4_Full +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#endif +#ifndef ADVERTISED_10000baseKR_Full +#define ADVERTISED_10000baseKR_Full (1 << 19) +#endif + +#else /* < 2.6.31 */ #ifndef HAVE_NETDEV_STORAGE_ADDRESS #define HAVE_NETDEV_STORAGE_ADDRESS #endif @@ -2405,6 +2694,9 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #ifndef HAVE_TRANS_START_IN_QUEUE #define HAVE_TRANS_START_IN_QUEUE #endif +#ifndef HAVE_INCLUDE_LINUX_MDIO_H +#define HAVE_INCLUDE_LINUX_MDIO_H +#endif #endif /* < 2.6.31 */ /*****************************************************************************/ @@ -2417,9 +2709,21 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #endif #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline int _kc_pm_runtime_get_sync() +{ + return 1; +} +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() +#else /* 2.6.0 => 2.6.32 */ +static inline int _kc_pm_runtime_get_sync(struct device *dev) +{ + return 1; +} #ifndef pm_runtime_get_sync -#define pm_runtime_get_sync(dev) do {} while (0) +#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) #endif +#endif /* 2.6.0 => 2.6.32 */ #ifndef pm_runtime_put #define pm_runtime_put(dev) do {} while (0) #endif @@ -2489,17 +2793,87 @@ extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, int, int); #ifndef __percpu #define __percpu #endif /* __percpu */ +#ifndef PORT_DA +#define PORT_DA PORT_OTHER +#endif +#ifndef PORT_NONE +#define PORT_NONE PORT_OTHER +#endif + +#if ((RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) +#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) +#undef DEFINE_DMA_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME +#undef DEFINE_DMA_UNMAP_LEN +#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME +#undef dma_unmap_addr +#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) +#undef dma_unmap_addr_set +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) +#undef dma_unmap_len +#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) +#undef dma_unmap_len_set +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) +#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ +#endif /* RHEL_RELEASE_CODE */ + +#if (!(RHEL_RELEASE_CODE && \ + (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ + ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) +static inline bool pci_is_pcie(struct pci_dev *dev) +{ + return !!pci_pcie_cap(dev); +} +#endif /* RHEL_RELEASE_CODE */ + +#ifndef __always_unused +#define __always_unused __attribute__((__unused__)) +#endif +#ifndef __maybe_unused +#define __maybe_unused __attribute__((__unused__)) +#endif + +#if (!(RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) +#define sk_tx_queue_get(_sk) (-1) +#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) +#endif /* !(RHEL >= 6.2) */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT +#define HAVE_ETHTOOL_SET_PHYS_ID +#define HAVE_ETHTOOL_GET_TS_INFO +#endif /* RHEL >= 6.4 && RHEL < 7.0 */ + +#if (RHEL_RELEASE_CODE && \ + (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ + (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) +#define HAVE_RHEL6_NETDEV_OPS_EXT_FDB +#endif /* RHEL >= 6.5 && RHEL < 7.0 */ + #else /* < 2.6.33 */ #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) #ifndef HAVE_NETDEV_OPS_FCOE_GETWWN #define HAVE_NETDEV_OPS_FCOE_GETWWN #endif #endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ -#define HAVE_ETHTOOL_SFP_DISPLAY_PORT #endif /* < 2.6.33 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) +#ifndef pci_num_vf +#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) +extern int _kc_pci_num_vf(struct pci_dev *dev); +#endif +#endif /* RHEL_RELEASE_CODE */ + #ifndef ETH_FLAG_NTUPLE #define ETH_FLAG_NTUPLE NETIF_F_NTUPLE #endif @@ -2547,16 +2921,13 @@ static inline const char *_kc_netdev_name(const struct net_device *dev) #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) #define netdev_printk(level, netdev, format, args...) \ do { \ - struct adapter_struct *kc_adapter = netdev_priv(netdev);\ - struct pci_dev *pdev = kc_adapter->pdev; \ - printk("%s %s: " format, level, pci_name(pdev), \ - ##args); \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ + printk(level "%s: " format, pci_name(pdev), ##args); \ } while(0) #elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) #define netdev_printk(level, netdev, format, args...) \ do { \ - struct adapter_struct *kc_adapter = netdev_priv(netdev);\ - struct pci_dev *pdev = kc_adapter->pdev; \ + struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ struct device *dev = pci_dev_to_dev(pdev); \ dev_printk(level, dev, "%s: " format, \ netdev_name(netdev), ##args); \ @@ -2589,7 +2960,10 @@ do { \ #define netdev_info(dev, format, args...) \ netdev_printk(KERN_INFO, dev, format, ##args) #undef netdev_dbg -#if defined(CONFIG_DYNAMIC_DEBUG) +#if defined(DEBUG) +#define netdev_dbg(__dev, format, args...) \ + netdev_printk(KERN_DEBUG, __dev, format, ##args) +#elif defined(CONFIG_DYNAMIC_DEBUG) #define netdev_dbg(__dev, format, args...) \ do { \ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ @@ -2632,6 +3006,9 @@ do { \ #undef netif_info #define netif_info(priv, type, dev, fmt, args...) \ netif_level(info, priv, type, dev, fmt, ##args) +#undef netif_dbg +#define netif_dbg(priv, type, dev, fmt, args...) \ + netif_level(dbg, priv, type, dev, fmt, ##args) #ifdef SET_SYSTEM_SLEEP_PM_OPS #define HAVE_SYSTEM_SLEEP_PM_OPS @@ -2645,13 +3022,44 @@ do { \ #endif /* for_each_set_bit */ #ifndef DEFINE_DMA_UNMAP_ADDR -#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR +#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR #define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN #define dma_unmap_addr pci_unmap_addr #define dma_unmap_addr_set pci_unmap_addr_set #define dma_unmap_len pci_unmap_len #define dma_unmap_len_set pci_unmap_len_set #endif /* DEFINE_DMA_UNMAP_ADDR */ + +#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) +#ifdef IGB_HWMON +#ifdef CONFIG_DEBUG_LOCK_ALLOC +#define sysfs_attr_init(attr) \ + do { \ + static struct lock_class_key __key; \ + (attr)->key = &__key; \ + } while (0) +#else +#define sysfs_attr_init(attr) do {} while (0) +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ +#endif /* IGB_HWMON */ +#endif /* RHEL_RELEASE_CODE */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +static inline bool _kc_pm_runtime_suspended() +{ + return false; +} +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() +#else /* 2.6.0 => 2.6.34 */ +static inline bool _kc_pm_runtime_suspended(struct device *dev) +{ + return false; +} +#ifndef pm_runtime_suspended +#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) +#endif +#endif /* 2.6.0 => 2.6.34 */ + #else /* < 2.6.34 */ #define HAVE_SYSTEM_SLEEP_PM_OPS #ifndef HAVE_SET_RX_MODE @@ -2662,26 +3070,36 @@ do { \ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) + +ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count); +#define simple_write_to_buffer _kc_simple_write_to_buffer + #ifndef numa_node_id #define numa_node_id() 0 #endif #ifdef HAVE_TX_MQ #include #ifndef CONFIG_NETDEVICES_MULTIQUEUE +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); #define netif_set_real_num_tx_queues _kc_netif_set_real_num_tx_queues +#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ #else /* CONFIG_NETDEVICES_MULTI_QUEUE */ #define netif_set_real_num_tx_queues(_netdev, _count) \ do { \ (_netdev)->egress_subqueue_count = _count; \ } while (0) #endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ -#else +#else /* HAVE_TX_MQ */ #define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0) #endif /* HAVE_TX_MQ */ #ifndef ETH_FLAG_RXHASH #define ETH_FLAG_RXHASH (1<<28) #endif /* ETH_FLAG_RXHASH */ +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) +#define HAVE_IRQ_AFFINITY_HINT +#endif #else /* < 2.6.35 */ #define HAVE_PM_QOS_REQUEST_LIST #define HAVE_IRQ_AFFINITY_HINT @@ -2739,9 +3157,21 @@ do { \ } while (0) #undef usleep_range -#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) +#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) + +#define u64_stats_update_begin(a) do { } while(0) +#define u64_stats_update_end(a) do { } while(0) +#define u64_stats_fetch_begin(a) do { } while(0) +#define u64_stats_fetch_retry_bh(a) (0) +#define u64_stats_fetch_begin_bh(a) (0) + +#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) +#define HAVE_8021P_SUPPORT +#endif #else /* < 2.6.36 */ + + #define HAVE_PM_QOS_REQUEST_ACTIVE #define HAVE_8021P_SUPPORT #define HAVE_NDO_GET_STATS64 @@ -2749,6 +3179,15 @@ do { \ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) +#ifndef netif_set_real_num_rx_queues +static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev, + unsigned int rxq) +{ + return 0; +} +#define netif_set_real_num_rx_queues(dev, rxq) \ + __kc_netif_set_real_num_rx_queues((dev), (rxq)) +#endif #ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) #endif @@ -2805,6 +3244,11 @@ static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb) #define SKBTX_IN_PROGRESS (1 << 2) #define SKB_SHARED_TX_IS_UNION #endif + +#ifndef device_wakeup_enable +#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) +#endif + #if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) #ifndef HAVE_VLAN_RX_REGISTER #define HAVE_VLAN_RX_REGISTER @@ -2843,13 +3287,16 @@ static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) #define DCB_CAP_DCBX_STATIC 0x10 #endif #endif /* CONFIG_DCB */ -extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); -#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) -#else /* < 2.6.38 */ +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) +#define CONFIG_XPS +#endif /* RHEL_RELEASE_VERSION(6,2) */ #endif /* < 2.6.38 */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) +#ifndef NETIF_F_RXCSUM +#define NETIF_F_RXCSUM (1 << 29) +#endif #ifndef skb_queue_reverse_walk_safe #define skb_queue_reverse_walk_safe(queue, skb, tmp) \ for (skb = (queue)->prev, tmp = skb->prev; \ @@ -2882,7 +3329,7 @@ extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); /* use < 2.6.40 because of a Fedora 15 kernel update where they * updated the kernel version to 2.6.40.x and they back-ported 3.0 features * like set_phys_id for ethtool. - */ + */ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) #ifdef ETHTOOL_GRXRINGS #ifndef FLOW_EXT @@ -2926,6 +3373,11 @@ struct _kc_ethtool_rx_flow_spec { #define HAVE_ETHTOOL_SET_PHYS_ID #endif /* < 2.6.40 */ +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) +#define USE_LEGACY_PM_SUPPORT +#endif /* < 3.0.0 */ + /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) #ifndef __netdev_alloc_skb_ip_align @@ -2934,6 +3386,11 @@ struct _kc_ethtool_rx_flow_spec { #define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) #define dcb_ieee_delapp(dev, app) 0 #define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) + +/* 1000BASE-T Control register */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + #else /* < 3.1.0 */ #ifndef HAVE_DCBNL_IEEE_DELAPP #define HAVE_DCBNL_IEEE_DELAPP @@ -2979,6 +3436,9 @@ static inline void *_kc_skb_frag_address(const skb_frag_t *frag) #endif /* skb_frag_address */ #ifndef skb_frag_dma_map +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) +#include +#endif #define skb_frag_dma_map(dev,frag,offset,size,dir) \ _kc_skb_frag_dma_map(dev,frag,offset,size,dir) static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, @@ -2998,6 +3458,18 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag) put_page(skb_frag_page(frag)); } #endif /* __skb_frag_unref */ + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif +#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) +#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED +#define HAVE_PCI_DEV_FLAGS_ASSIGNED +#endif +#endif #else /* < 3.2.0 */ #ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED #define HAVE_PCI_DEV_FLAGS_ASSIGNED @@ -3009,11 +3481,21 @@ static inline void __kc_skb_frag_unref(skb_frag_t *frag) #undef ixgbe_get_netdev_tc_txq #define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) #endif - /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) -typedef u32 netdev_features_t; +typedef u32 kni_netdev_features_t; +#undef PCI_EXP_TYPE_RC_EC +#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ +#ifndef CONFIG_BQL +#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) +#define netdev_completed_queue(_n, _p, _b) do {} while (0) +#define netdev_tx_sent_queue(_q, _b) do {} while (0) +#define netdev_sent_queue(_n, _b) do {} while (0) +#define netdev_tx_reset_queue(_q) do {} while (0) +#define netdev_reset_queue(_n) do {} while (0) +#endif #else /* ! < 3.3.0 */ +typedef netdev_features_t kni_netdev_features_t; #define HAVE_INT_NDO_VLAN_RX_ADD_VID #ifdef ETHTOOL_SRXNTUPLE #undef ETHTOOL_SRXNTUPLE @@ -3029,14 +3511,384 @@ typedef u32 netdev_features_t; #define NETIF_F_RXALL 0 #endif /* NETIF_F_RXALL */ +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) #define NUMTCS_RETURNS_U8 +int _kc_simple_open(struct inode *inode, struct file *file); +#define simple_open _kc_simple_open +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ + -#endif /* < 3.4.0 */ +#ifndef skb_add_rx_frag +#define skb_add_rx_frag _kc_skb_add_rx_frag +extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, + int, int, unsigned int); +#endif +#ifdef NET_ADDR_RANDOM +#define eth_hw_addr_random(N) do { \ + random_ether_addr(N->dev_addr); \ + N->addr_assign_type |= NET_ADDR_RANDOM; \ + } while (0) +#else /* NET_ADDR_RANDOM */ +#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr) +#endif /* NET_ADDR_RANDOM */ +#else /* < 3.4.0 */ +#include +#endif /* >= 3.4.0 */ + +/*****************************************************************************/ +#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP) +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) +#define HAVE_PTP_1588_CLOCK +#else +#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel +#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ +#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */ /*****************************************************************************/ #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) +#define skb_tx_timestamp(skb) do {} while (0) +static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) +{ + return !compare_ether_addr(addr1, addr2); +} +#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) #else #define HAVE_FDB_OPS +#define HAVE_ETHTOOL_GET_TS_INFO #endif /* < 3.5.0 */ + +/*****************************************************************************/ +#include +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) +#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ +#endif +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ +#endif +#ifndef MDIO_EEE_10GT +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#endif +#ifndef MDIO_EEE_1000KX +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#endif +#ifndef MDIO_EEE_10GKX4 +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#endif +#ifndef MDIO_EEE_10GKR +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#endif +#endif /* < 3.6.0 */ + +/******************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) +#ifndef ADVERTISED_40000baseKR4_Full +/* these defines were all added in one commit, so should be safe + * to trigger activiation on one define + */ +#define SUPPORTED_40000baseKR4_Full (1 << 23) +#define SUPPORTED_40000baseCR4_Full (1 << 24) +#define SUPPORTED_40000baseSR4_Full (1 << 25) +#define SUPPORTED_40000baseLR4_Full (1 << 26) +#define ADVERTISED_40000baseKR4_Full (1 << 23) +#define ADVERTISED_40000baseCR4_Full (1 << 24) +#define ADVERTISED_40000baseSR4_Full (1 << 25) +#define ADVERTISED_40000baseLR4_Full (1 << 26) +#endif + +/** + * mmd_eee_cap_to_ethtool_sup_t + * @eee_cap: value of the MMD EEE Capability register + * + * A small helper function that translates MMD EEE Capability (3.20) bits + * to ethtool supported settings. + */ +static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) +{ + u32 supported = 0; + + if (eee_cap & MDIO_EEE_100TX) + supported |= SUPPORTED_100baseT_Full; + if (eee_cap & MDIO_EEE_1000T) + supported |= SUPPORTED_1000baseT_Full; + if (eee_cap & MDIO_EEE_10GT) + supported |= SUPPORTED_10000baseT_Full; + if (eee_cap & MDIO_EEE_1000KX) + supported |= SUPPORTED_1000baseKX_Full; + if (eee_cap & MDIO_EEE_10GKX4) + supported |= SUPPORTED_10000baseKX4_Full; + if (eee_cap & MDIO_EEE_10GKR) + supported |= SUPPORTED_10000baseKR_Full; + + return supported; +} +#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ + __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) + +/** + * mmd_eee_adv_to_ethtool_adv_t + * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers + * + * A small helper function that translates the MMD EEE Advertisement (7.60) + * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement + * settings. + */ +static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) +{ + u32 adv = 0; + + if (eee_adv & MDIO_EEE_100TX) + adv |= ADVERTISED_100baseT_Full; + if (eee_adv & MDIO_EEE_1000T) + adv |= ADVERTISED_1000baseT_Full; + if (eee_adv & MDIO_EEE_10GT) + adv |= ADVERTISED_10000baseT_Full; + if (eee_adv & MDIO_EEE_1000KX) + adv |= ADVERTISED_1000baseKX_Full; + if (eee_adv & MDIO_EEE_10GKX4) + adv |= ADVERTISED_10000baseKX4_Full; + if (eee_adv & MDIO_EEE_10GKR) + adv |= ADVERTISED_10000baseKR_Full; + + return adv; +} +#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ + __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) + +/** + * ethtool_adv_to_mmd_eee_adv_t + * @adv: the ethtool advertisement settings + * + * A small helper function that translates ethtool advertisement settings + * to EEE advertisements for the MMD EEE Advertisement (7.60) and + * MMD EEE Link Partner Ability (7.61) registers. + */ +static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) +{ + u16 reg = 0; + + if (adv & ADVERTISED_100baseT_Full) + reg |= MDIO_EEE_100TX; + if (adv & ADVERTISED_1000baseT_Full) + reg |= MDIO_EEE_1000T; + if (adv & ADVERTISED_10000baseT_Full) + reg |= MDIO_EEE_10GT; + if (adv & ADVERTISED_1000baseKX_Full) + reg |= MDIO_EEE_1000KX; + if (adv & ADVERTISED_10000baseKX4_Full) + reg |= MDIO_EEE_10GKX4; + if (adv & ADVERTISED_10000baseKR_Full) + reg |= MDIO_EEE_10GKR; + + return reg; +} +#define ethtool_adv_to_mmd_eee_adv_t(adv) \ + __kc_ethtool_adv_to_mmd_eee_adv_t(adv) + +#ifndef pci_pcie_type +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) +static inline u8 pci_pcie_type(struct pci_dev *pdev) +{ + int pos; + u16 reg16; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (!pos) + BUG(); + pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); + return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; +} +#else /* < 2.6.24 */ +#define pci_pcie_type(x) (x)->pcie_type +#endif /* < 2.6.24 */ +#endif /* pci_pcie_type */ + +#define ptp_clock_register(caps, args...) ptp_clock_register(caps) + +#ifndef PCI_EXP_LNKSTA2 +int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); +#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) +int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); +#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) +int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, + u16 clear, u16 set); +#define pcie_capability_clear_and_set_word(d,p,c,s) \ + __kc_pcie_capability_clear_and_set_word(d,p,c,s) + +#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ + +static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos, + u16 clear) +{ + return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); +} +#endif /* !PCI_EXP_LNKSTA2 */ + +#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) +#define USE_CONST_DEV_UC_CHAR +#endif + +#else /* >= 3.7.0 */ +#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS +#define USE_CONST_DEV_UC_CHAR +#endif /* >= 3.7.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) +#ifndef PCI_EXP_LNKCTL_ASPM_L0S +#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ +#endif +#ifndef PCI_EXP_LNKCTL_ASPM_L1 +#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ +#endif +#define HAVE_CONFIG_HOTPLUG +/* Reserved Ethernet Addresses per IEEE 802.1Q */ +static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { + 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) &&\ + !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) +static inline bool is_link_local_ether_addr(const u8 *addr) +{ + __be16 *a = (__be16 *)addr; + static const __be16 *b = (const __be16 *)eth_reserved_addr_base; + static const __be16 m = cpu_to_be16(0xfff0); + + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; +} +#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ +#else /* >= 3.8.0 */ +#ifndef __devinit +#define __devinit +#define HAVE_ENCAP_CSUM_OFFLOAD +#endif + +#ifndef __devinitdata +#define __devinitdata +#endif + +#ifndef __devexit +#define __devexit +#endif + +#ifndef __devexit_p +#define __devexit_p +#endif + +#ifndef HAVE_SRIOV_CONFIGURE +#define HAVE_SRIOV_CONFIGURE +#endif + +#define HAVE_BRIDGE_ATTRIBS +#ifndef BRIDGE_MODE_VEB +#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ +#endif /* BRIDGE_MODE_VEB */ +#ifndef BRIDGE_MODE_VEPA +#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ +#endif /* BRIDGE_MODE_VEPA */ +#endif /* >= 3.8.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) + +#undef hlist_entry +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#undef hlist_entry_safe +#define hlist_entry_safe(ptr, type, member) \ + (ptr) ? hlist_entry(ptr, type, member) : NULL + +#undef hlist_for_each_entry +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) + +#undef hlist_for_each_entry_safe +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, typeof(*pos), member)) + +#ifdef CONFIG_XPS +extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16); +#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) +#else /* CONFIG_XPS */ +#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) +#endif /* CONFIG_XPS */ + +#ifdef HAVE_NETDEV_SELECT_QUEUE +#define _kc_hashrnd 0xd631614b /* not so random hash salt */ +extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); +#define __netdev_pick_tx __kc_netdev_pick_tx +#endif /* HAVE_NETDEV_SELECT_QUEUE */ +#else +#define HAVE_BRIDGE_FILTER +#define USE_DEFAULT_FDB_DEL_DUMP +#endif /* < 3.9.0 */ + +/*****************************************************************************/ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) +#ifdef CONFIG_PCI_IOV +extern int __kc_pci_vfs_assigned(struct pci_dev *dev); +#else +static inline int __kc_pci_vfs_assigned(struct pci_dev *dev) +{ + return 0; +} +#endif +#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) + +#ifndef VLAN_TX_COOKIE_MAGIC +static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, + u16 vlan_tci) +{ +#ifdef VLAN_TAG_PRESENT + vlan_tci |= VLAN_TAG_PRESENT; +#endif + skb->vlan_tci = vlan_tci; + return skb; +} +#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ + __kc__vlan_hwaccel_put_tag(skb, vlan_tci) +#endif + +#else /* >= 3.10.0 */ +#define HAVE_ENCAP_TSO_OFFLOAD +#endif /* >= 3.10.0 */ + +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) +#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6))) +#if (!(UBUNTU_KERNEL_CODE >= UBUNTU_KERNEL_VERSION(3,13,0,30,54) \ + && (UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(12,4) \ + || UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(14,4)))) +#ifdef NETIF_F_RXHASH +#define PKT_HASH_TYPE_L3 0 +static inline void +skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type) +{ + skb->rxhash = hash; +} +#endif /* NETIF_F_RXHASH */ +#endif /* < 3.13.0-30.54 (Ubuntu 14.04) */ +#endif /* < RHEL7 */ +#endif /* < 3.14.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) ) +#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops)) +#define HAVE_VF_MIN_MAX_TXRATE 1 +#endif /* >= 3.16.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) ) +#define HAVE_NDO_FDB_ADD_VID +#endif /* >= 3.19.0 */ + +#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) ) +/* vlan_tx_xx functions got renamed to skb_vlan */ +#define vlan_tx_tag_get skb_vlan_tag_get +#define vlan_tx_tag_present skb_vlan_tag_present +#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS +#endif /* 4.0.0 */ #endif /* _KCOMPAT_H_ */