1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
31 /*****************************************************************************/
32 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
33 /* From lib/vsprintf.c */
34 #include <asm/div64.h>
36 static int skip_atoi(const char **s)
41 i = i*10 + *((*s)++) - '0';
45 #define _kc_ZEROPAD 1 /* pad with zero */
46 #define _kc_SIGN 2 /* unsigned/signed long */
47 #define _kc_PLUS 4 /* show plus */
48 #define _kc_SPACE 8 /* space if plus */
49 #define _kc_LEFT 16 /* left justified */
50 #define _kc_SPECIAL 32 /* 0x */
51 #define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
53 static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
57 const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
58 const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
61 digits = (type & _kc_LARGE) ? large_digits : small_digits;
64 if (base < 2 || base > 36)
66 c = (type & _kc_ZEROPAD) ? '0' : ' ';
68 if (type & _kc_SIGN) {
73 } else if (type & _kc_PLUS) {
76 } else if (type & _kc_SPACE) {
81 if (type & _kc_SPECIAL) {
91 tmp[i++] = digits[do_div(num,base)];
95 if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
107 if (type & _kc_SPECIAL) {
112 } else if (base==16) {
121 if (!(type & _kc_LEFT)) {
128 while (i < precision--) {
146 int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
149 unsigned long long num;
154 int flags; /* flags to number() */
156 int field_width; /* width of output field */
157 int precision; /* min. # of digits for integers; max
158 number of chars for from string */
159 int qualifier; /* 'h', 'l', or 'L' for integer fields */
160 /* 'z' support added 23/7/1999 S.H. */
161 /* 'z' changed to 'Z' --davidm 1/25/99 */
164 end = buf + size - 1;
168 size = end - buf + 1;
171 for (; *fmt ; ++fmt) {
182 ++fmt; /* this also skips first '%' */
184 case '-': flags |= _kc_LEFT; goto repeat;
185 case '+': flags |= _kc_PLUS; goto repeat;
186 case ' ': flags |= _kc_SPACE; goto repeat;
187 case '#': flags |= _kc_SPECIAL; goto repeat;
188 case '0': flags |= _kc_ZEROPAD; goto repeat;
191 /* get field width */
194 field_width = skip_atoi(&fmt);
195 else if (*fmt == '*') {
197 /* it's the next argument */
198 field_width = va_arg(args, int);
199 if (field_width < 0) {
200 field_width = -field_width;
205 /* get the precision */
210 precision = skip_atoi(&fmt);
211 else if (*fmt == '*') {
213 /* it's the next argument */
214 precision = va_arg(args, int);
220 /* get the conversion qualifier */
222 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
232 if (!(flags & _kc_LEFT)) {
233 while (--field_width > 0) {
239 c = (unsigned char) va_arg(args, int);
243 while (--field_width > 0) {
251 s = va_arg(args, char *);
255 len = strnlen(s, precision);
257 if (!(flags & _kc_LEFT)) {
258 while (len < field_width--) {
264 for (i = 0; i < len; ++i) {
269 while (len < field_width--) {
277 if (field_width == -1) {
278 field_width = 2*sizeof(void *);
279 flags |= _kc_ZEROPAD;
281 str = number(str, end,
282 (unsigned long) va_arg(args, void *),
283 16, field_width, precision, flags);
289 * What does C99 say about the overflow case here? */
290 if (qualifier == 'l') {
291 long * ip = va_arg(args, long *);
293 } else if (qualifier == 'Z') {
294 size_t * ip = va_arg(args, size_t *);
297 int * ip = va_arg(args, int *);
308 /* integer number formats - set up the flags and "break" */
338 if (qualifier == 'L')
339 num = va_arg(args, long long);
340 else if (qualifier == 'l') {
341 num = va_arg(args, unsigned long);
342 if (flags & _kc_SIGN)
343 num = (signed long) num;
344 } else if (qualifier == 'Z') {
345 num = va_arg(args, size_t);
346 } else if (qualifier == 'h') {
347 num = (unsigned short) va_arg(args, int);
348 if (flags & _kc_SIGN)
349 num = (signed short) num;
351 num = va_arg(args, unsigned int);
352 if (flags & _kc_SIGN)
353 num = (signed int) num;
355 str = number(str, end, num, base,
356 field_width, precision, flags);
361 /* don't write out a null byte if the buf size is zero */
363 /* the trailing null byte doesn't count towards the total
369 int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
375 i = _kc_vsnprintf(buf,size,fmt,args);
381 /*****************************************************************************/
382 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
384 /**************************************/
385 /* PCI DMA MAPPING */
387 #if defined(CONFIG_HIGHMEM)
389 #ifndef PCI_DRAM_OFFSET
390 #define PCI_DRAM_OFFSET 0
394 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
395 size_t size, int direction)
397 return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
401 #else /* CONFIG_HIGHMEM */
404 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
405 size_t size, int direction)
407 return pci_map_single(dev, (void *)page_address(page) + offset, size,
411 #endif /* CONFIG_HIGHMEM */
414 _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
417 return pci_unmap_single(dev, dma_addr, size, direction);
420 #endif /* 2.4.13 => 2.4.3 */
422 /*****************************************************************************/
423 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
425 /**************************************/
429 _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
431 if (!pci_dma_supported(dev, mask))
433 dev->dma_mask = mask;
438 _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
442 for (i = 0; i < 6; i++) {
443 if (pci_resource_len(dev, i) == 0)
446 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
447 if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
448 pci_release_regions(dev);
451 } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
452 if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
453 pci_release_regions(dev);
462 _kc_pci_release_regions(struct pci_dev *dev)
466 for (i = 0; i < 6; i++) {
467 if (pci_resource_len(dev, i) == 0)
470 if (pci_resource_flags(dev, i) & IORESOURCE_IO)
471 release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
473 else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
474 release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
478 /**************************************/
479 /* NETWORK DRIVER API */
482 _kc_alloc_etherdev(int sizeof_priv)
484 struct net_device *dev;
487 alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
488 dev = kzalloc(alloc_size, GFP_KERNEL);
493 dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
501 _kc_is_valid_ether_addr(u8 *addr)
503 const char zaddr[6] = { 0, };
505 return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
508 #endif /* 2.4.3 => 2.4.0 */
510 /*****************************************************************************/
511 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
514 _kc_pci_set_power_state(struct pci_dev *dev, int state)
520 _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
525 #endif /* 2.4.6 => 2.4.3 */
527 /*****************************************************************************/
528 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
529 void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
532 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
534 frag->page_offset = off;
536 skb_shinfo(skb)->nr_frags = i + 1;
540 * Original Copyright:
541 * find_next_bit.c: fallback find next bit implementation
543 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
544 * Written by David Howells (dhowells@redhat.com)
548 * find_next_bit - find the next set bit in a memory region
549 * @addr: The address to base the search on
550 * @offset: The bitnumber to start searching at
551 * @size: The maximum size to search
553 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
554 unsigned long offset)
556 const unsigned long *p = addr + BITOP_WORD(offset);
557 unsigned long result = offset & ~(BITS_PER_LONG-1);
563 offset %= BITS_PER_LONG;
566 tmp &= (~0UL << offset);
567 if (size < BITS_PER_LONG)
571 size -= BITS_PER_LONG;
572 result += BITS_PER_LONG;
574 while (size & ~(BITS_PER_LONG-1)) {
577 result += BITS_PER_LONG;
578 size -= BITS_PER_LONG;
585 tmp &= (~0UL >> (BITS_PER_LONG - size));
586 if (tmp == 0UL) /* Are any bits set? */
587 return result + size; /* Nope. */
589 return result + ffs(tmp);
592 size_t _kc_strlcpy(char *dest, const char *src, size_t size)
594 size_t ret = strlen(src);
597 size_t len = (ret >= size) ? size - 1 : ret;
598 memcpy(dest, src, len);
605 #if BITS_PER_LONG == 32
606 uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base)
611 uint32_t high = rem >> 32;
613 /* Reduce the thing a bit first */
617 res = (uint64_t) high << 32;
618 rem -= (uint64_t) (high*base) << 32;
621 while ((int64_t)b > 0 && b < rem) {
638 #endif /* BITS_PER_LONG == 32 */
640 #endif /* 2.6.0 => 2.4.6 */
642 /*****************************************************************************/
643 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
644 int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
650 i = vsnprintf(buf, size, fmt, args);
652 return (i >= size) ? (size - 1) : i;
656 /*****************************************************************************/
657 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
658 DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
659 #endif /* < 2.6.10 */
661 /*****************************************************************************/
662 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
663 char *_kc_kstrdup(const char *s, unsigned int gfp)
672 buf = kmalloc(len, gfp);
677 #endif /* < 2.6.13 */
679 /*****************************************************************************/
680 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
681 void *_kc_kzalloc(size_t size, int flags)
683 void *ret = kmalloc(size, flags);
685 memset(ret, 0, size);
688 #endif /* <= 2.6.13 */
690 /*****************************************************************************/
691 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
692 int _kc_skb_pad(struct sk_buff *skb, int pad)
696 /* If the skbuff is non linear tailroom is always zero.. */
697 if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
698 memset(skb->data+skb->len, 0, pad);
702 ntail = skb->data_len + pad - (skb->end - skb->tail);
703 if (likely(skb_cloned(skb) || ntail > 0)) {
704 if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
709 if (skb_is_nonlinear(skb) &&
710 !__pskb_pull_tail(skb, skb->data_len))
714 memset(skb->data + skb->len, 0, pad);
722 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
723 int _kc_pci_save_state(struct pci_dev *pdev)
725 struct net_device *netdev = pci_get_drvdata(pdev);
726 struct adapter_struct *adapter = netdev_priv(netdev);
727 int size = PCI_CONFIG_SPACE_LEN, i;
728 u16 pcie_cap_offset, pcie_link_status;
730 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
731 /* no ->dev for 2.4 kernels */
732 WARN_ON(pdev->dev.driver_data == NULL);
734 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
735 if (pcie_cap_offset) {
736 if (!pci_read_config_word(pdev,
737 pcie_cap_offset + PCIE_LINK_STATUS,
739 size = PCIE_CONFIG_SPACE_LEN;
741 pci_config_space_ich8lan();
743 if (adapter->config_space == NULL)
745 WARN_ON(adapter->config_space != NULL);
747 adapter->config_space = kmalloc(size, GFP_KERNEL);
748 if (!adapter->config_space) {
749 printk(KERN_ERR "Out of memory in pci_save_state\n");
752 for (i = 0; i < (size / 4); i++)
753 pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
757 void _kc_pci_restore_state(struct pci_dev *pdev)
759 struct net_device *netdev = pci_get_drvdata(pdev);
760 struct adapter_struct *adapter = netdev_priv(netdev);
761 int size = PCI_CONFIG_SPACE_LEN, i;
763 u16 pcie_link_status;
765 if (adapter->config_space != NULL) {
766 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
767 if (pcie_cap_offset &&
768 !pci_read_config_word(pdev,
769 pcie_cap_offset + PCIE_LINK_STATUS,
771 size = PCIE_CONFIG_SPACE_LEN;
773 pci_config_space_ich8lan();
774 for (i = 0; i < (size / 4); i++)
775 pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
777 kfree(adapter->config_space);
778 adapter->config_space = NULL;
782 #endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
785 void _kc_free_netdev(struct net_device *netdev)
787 struct adapter_struct *adapter = netdev_priv(netdev);
789 if (adapter->config_space != NULL)
790 kfree(adapter->config_space);
792 if (netdev->reg_state == NETREG_UNINITIALIZED) {
793 kfree((char *)netdev - netdev->padded);
795 BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
796 netdev->reg_state = NETREG_RELEASED;
797 class_device_put(&netdev->class_dev);
800 kfree((char *)netdev - netdev->padded);
805 void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
809 p = kzalloc(len, gfp);
814 #endif /* <= 2.6.19 */
816 /*****************************************************************************/
817 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
818 struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev)
820 return ((struct adapter_struct *)netdev_priv(netdev))->pdev;
822 #endif /* < 2.6.21 */
824 /*****************************************************************************/
825 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
826 /* hexdump code taken from lib/hexdump.c */
827 static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
828 int groupsize, unsigned char *linebuf,
829 size_t linebuflen, bool ascii)
836 if (rowsize != 16 && rowsize != 32)
841 if (len > rowsize) /* limit to one line at a time */
843 if ((len % groupsize) != 0) /* no mixed size output */
848 const u64 *ptr8 = buf;
849 int ngroups = len / groupsize;
851 for (j = 0; j < ngroups; j++)
852 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
853 "%s%16.16llx", j ? " " : "",
854 (unsigned long long)*(ptr8 + j));
855 ascii_column = 17 * ngroups + 2;
860 const u32 *ptr4 = buf;
861 int ngroups = len / groupsize;
863 for (j = 0; j < ngroups; j++)
864 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
865 "%s%8.8x", j ? " " : "", *(ptr4 + j));
866 ascii_column = 9 * ngroups + 2;
871 const u16 *ptr2 = buf;
872 int ngroups = len / groupsize;
874 for (j = 0; j < ngroups; j++)
875 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
876 "%s%4.4x", j ? " " : "", *(ptr2 + j));
877 ascii_column = 5 * ngroups + 2;
882 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
884 linebuf[lx++] = hex_asc(ch >> 4);
885 linebuf[lx++] = hex_asc(ch & 0x0f);
891 ascii_column = 3 * rowsize + 2;
897 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
899 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
900 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
903 linebuf[lx++] = '\0';
906 void _kc_print_hex_dump(const char *level,
907 const char *prefix_str, int prefix_type,
908 int rowsize, int groupsize,
909 const void *buf, size_t len, bool ascii)
912 int i, linelen, remaining = len;
913 unsigned char linebuf[200];
915 if (rowsize != 16 && rowsize != 32)
918 for (i = 0; i < len; i += rowsize) {
919 linelen = min(remaining, rowsize);
920 remaining -= rowsize;
921 _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
922 linebuf, sizeof(linebuf), ascii);
924 switch (prefix_type) {
925 case DUMP_PREFIX_ADDRESS:
926 printk("%s%s%*p: %s\n", level, prefix_str,
927 (int)(2 * sizeof(void *)), ptr + i, linebuf);
929 case DUMP_PREFIX_OFFSET:
930 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
933 printk("%s%s%s\n", level, prefix_str, linebuf);
939 #ifdef HAVE_I2C_SUPPORT
941 _kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
943 struct i2c_client *client;
946 client = kzalloc(sizeof *client, GFP_KERNEL);
950 client->adapter = adap;
952 client->dev.platform_data = info->platform_data;
954 client->flags = info->flags;
955 client->addr = info->addr;
957 strlcpy(client->name, info->type, sizeof(client->name));
959 /* Check for address business */
960 status = i2c_check_addr(adap, client->addr);
964 client->dev.parent = &client->adapter->dev;
965 client->dev.bus = &i2c_bus_type;
967 status = i2c_attach_client(client);
971 dev_dbg(&adap->dev, "client [%s] registered with bus id %s\n",
972 client->name, dev_name(&client->dev));
977 dev_err(&adap->dev, "Failed to register i2c client %s at 0x%02x "
978 "(%d)\n", client->name, client->addr, status);
982 #endif /* HAVE_I2C_SUPPORT */
983 #endif /* < 2.6.22 */
985 /*****************************************************************************/
986 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
988 struct net_device *napi_to_poll_dev(const struct napi_struct *napi)
990 struct adapter_q_vector *q_vector = container_of(napi,
991 struct adapter_q_vector,
993 return &q_vector->poll_dev;
996 int __kc_adapter_clean(struct net_device *netdev, int *budget)
999 int work_to_do = min(*budget, netdev->quota);
1000 /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
1001 struct napi_struct *napi = netdev->priv;
1002 work_done = napi->poll(napi, work_to_do);
1003 *budget -= work_done;
1004 netdev->quota -= work_done;
1005 return (work_done >= work_to_do) ? 1 : 0;
1008 #endif /* <= 2.6.24 */
1010 /*****************************************************************************/
1011 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
1012 void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
1014 struct pci_dev *parent = pdev->bus->self;
1021 pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
1023 pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
1024 link_state &= ~state;
1025 pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
1028 #endif /* < 2.6.26 */
1030 /*****************************************************************************/
1031 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
1033 void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
1035 struct adapter_struct *adapter = netdev_priv(netdev);
1038 netif_stop_queue(netdev);
1039 if (netif_is_multiqueue(netdev))
1040 for (i = 0; i < adapter->num_tx_queues; i++)
1041 netif_stop_subqueue(netdev, i);
1043 void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
1045 struct adapter_struct *adapter = netdev_priv(netdev);
1048 netif_wake_queue(netdev);
1049 if (netif_is_multiqueue(netdev))
1050 for (i = 0; i < adapter->num_tx_queues; i++)
1051 netif_wake_subqueue(netdev, i);
1053 void _kc_netif_tx_start_all_queues(struct net_device *netdev)
1055 struct adapter_struct *adapter = netdev_priv(netdev);
1058 netif_start_queue(netdev);
1059 if (netif_is_multiqueue(netdev))
1060 for (i = 0; i < adapter->num_tx_queues; i++)
1061 netif_start_subqueue(netdev, i);
1063 #endif /* HAVE_TX_MQ */
1065 #ifndef __WARN_printf
1066 void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
1070 printk(KERN_WARNING "------------[ cut here ]------------\n");
1071 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
1072 va_start(args, fmt);
1078 #endif /* __WARN_printf */
1079 #endif /* < 2.6.27 */
1081 /*****************************************************************************/
1082 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
1085 _kc_pci_prepare_to_sleep(struct pci_dev *dev)
1087 pci_power_t target_state;
1090 target_state = pci_choose_state(dev, PMSG_SUSPEND);
1092 pci_enable_wake(dev, target_state, true);
1094 error = pci_set_power_state(dev, target_state);
1097 pci_enable_wake(dev, target_state, false);
1103 _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
1107 err = pci_enable_wake(dev, PCI_D3cold, enable);
1111 err = pci_enable_wake(dev, PCI_D3hot, enable);
1116 #endif /* < 2.6.28 */
1118 /*****************************************************************************/
1119 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
1120 static void __kc_pci_set_master(struct pci_dev *pdev, bool enable)
1124 pci_read_config_word(pdev, PCI_COMMAND, &old_cmd);
1126 cmd = old_cmd | PCI_COMMAND_MASTER;
1128 cmd = old_cmd & ~PCI_COMMAND_MASTER;
1129 if (cmd != old_cmd) {
1130 dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n",
1131 enable ? "enabling" : "disabling");
1132 pci_write_config_word(pdev, PCI_COMMAND, cmd);
1134 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) )
1135 pdev->is_busmaster = enable;
1139 void _kc_pci_clear_master(struct pci_dev *dev)
1141 __kc_pci_set_master(dev, false);
1143 #endif /* < 2.6.29 */
1145 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
1146 #if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
1147 int _kc_pci_num_vf(struct pci_dev *dev)
1150 #ifdef CONFIG_PCI_IOV
1151 struct pci_dev *vfdev;
1153 /* loop through all ethernet devices starting at PF dev */
1154 vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL);
1156 if (vfdev->is_virtfn && vfdev->physfn == dev)
1159 vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev);
1165 #endif /* RHEL_RELEASE_CODE */
1166 #endif /* < 2.6.34 */
1168 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
1170 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
1171 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
1172 void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1174 unsigned int real_num = dev->real_num_tx_queues;
1175 struct Qdisc *qdisc;
1178 if (unlikely(txq > dev->num_tx_queues))
1180 else if (txq > real_num)
1181 dev->real_num_tx_queues = txq;
1182 else if ( txq < real_num) {
1183 dev->real_num_tx_queues = txq;
1184 for (i = txq; i < dev->num_tx_queues; i++) {
1185 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
1187 spin_lock_bh(qdisc_lock(qdisc));
1189 spin_unlock_bh(qdisc_lock(qdisc));
1194 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
1195 #endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
1196 #endif /* HAVE_TX_MQ */
1198 ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
1199 const void __user *from, size_t count)
1206 if (pos >= available || !count)
1208 if (count > available - pos)
1209 count = available - pos;
1210 res = copy_from_user(to + pos, from, count);
1214 *ppos = pos + count;
1218 #endif /* < 2.6.35 */
1220 /*****************************************************************************/
1221 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
1222 static const u32 _kc_flags_dup_features =
1223 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
1225 u32 _kc_ethtool_op_get_flags(struct net_device *dev)
1227 return dev->features & _kc_flags_dup_features;
1230 int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
1232 if (data & ~supported)
1235 dev->features = ((dev->features & ~_kc_flags_dup_features) |
1236 (data & _kc_flags_dup_features));
1239 #endif /* < 2.6.36 */
1241 /******************************************************************************/
1242 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
1243 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
1247 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
1248 #endif /* < 2.6.39 */
1250 /******************************************************************************/
1251 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
1252 void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1253 int off, int size, unsigned int truesize)
1255 skb_fill_page_desc(skb, i, page, off, size);
1257 skb->data_len += size;
1258 skb->truesize += truesize;
1261 int _kc_simple_open(struct inode *inode, struct file *file)
1263 if (inode->i_private)
1264 file->private_data = inode->i_private;
1269 #endif /* < 3.4.0 */
1271 /******************************************************************************/
1272 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
1273 #if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
1274 !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
1275 static inline int __kc_pcie_cap_version(struct pci_dev *dev)
1280 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1283 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
1284 return reg16 & PCI_EXP_FLAGS_VERS;
1287 static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev)
1292 static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev)
1294 int type = pci_pcie_type(dev);
1296 return __kc_pcie_cap_version(dev) > 1 ||
1297 type == PCI_EXP_TYPE_ROOT_PORT ||
1298 type == PCI_EXP_TYPE_ENDPOINT ||
1299 type == PCI_EXP_TYPE_LEG_END;
1302 static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev)
1304 int type = pci_pcie_type(dev);
1308 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1311 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg);
1313 return __kc_pcie_cap_version(dev) > 1 ||
1314 type == PCI_EXP_TYPE_ROOT_PORT ||
1315 (type == PCI_EXP_TYPE_DOWNSTREAM &&
1316 pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
1319 static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev)
1321 int type = pci_pcie_type(dev);
1323 return __kc_pcie_cap_version(dev) > 1 ||
1324 type == PCI_EXP_TYPE_ROOT_PORT ||
1325 type == PCI_EXP_TYPE_RC_EC;
1328 static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
1330 if (!pci_is_pcie(dev))
1334 case PCI_EXP_FLAGS_TYPE:
1336 case PCI_EXP_DEVCAP:
1337 case PCI_EXP_DEVCTL:
1338 case PCI_EXP_DEVSTA:
1339 return __kc_pcie_cap_has_devctl(dev);
1340 case PCI_EXP_LNKCAP:
1341 case PCI_EXP_LNKCTL:
1342 case PCI_EXP_LNKSTA:
1343 return __kc_pcie_cap_has_lnkctl(dev);
1344 case PCI_EXP_SLTCAP:
1345 case PCI_EXP_SLTCTL:
1346 case PCI_EXP_SLTSTA:
1347 return __kc_pcie_cap_has_sltctl(dev);
1351 return __kc_pcie_cap_has_rtctl(dev);
1352 case PCI_EXP_DEVCAP2:
1353 case PCI_EXP_DEVCTL2:
1354 case PCI_EXP_LNKCAP2:
1355 case PCI_EXP_LNKCTL2:
1356 case PCI_EXP_LNKSTA2:
1357 return __kc_pcie_cap_version(dev) > 1;
1364 * Note that these accessor functions are only for the "PCI Express
1365 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
1366 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
1368 int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
1376 if (__kc_pcie_capability_reg_implemented(dev, pos)) {
1377 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
1379 * Reset *val to 0 if pci_read_config_word() fails, it may
1380 * have been written as 0xFFFF if hardware error happens
1381 * during pci_read_config_word().
1389 * For Functions that do not implement the Slot Capabilities,
1390 * Slot Status, and Slot Control registers, these spaces must
1391 * be hardwired to 0b, with the exception of the Presence Detect
1392 * State bit in the Slot Status register of Downstream Ports,
1393 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
1395 if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
1396 pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
1397 *val = PCI_EXP_SLTSTA_PDS;
1403 int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
1408 if (!__kc_pcie_capability_reg_implemented(dev, pos))
1411 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
1414 int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
1420 ret = __kc_pcie_capability_read_word(dev, pos, &val);
1424 ret = __kc_pcie_capability_write_word(dev, pos, val);
1429 #endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) && \
1430 !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) */
1431 #endif /* < 3.7.0 */
1433 /******************************************************************************/
1434 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
1437 /*****************************************************************************/
1438 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
1439 #ifdef CONFIG_PCI_IOV
1440 int __kc_pci_vfs_assigned(struct pci_dev *dev)
1442 unsigned int vfs_assigned = 0;
1443 #ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
1445 struct pci_dev *vfdev;
1446 unsigned short dev_id;
1448 /* only search if we are a PF */
1449 if (!dev->is_physfn)
1452 /* find SR-IOV capability */
1453 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1458 * determine the device ID for the VFs, the vendor ID will be the
1459 * same as the PF so there is no need to check for that one
1461 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
1463 /* loop through all the VFs to see if we own any that are assigned */
1464 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
1467 * It is considered assigned if it is a virtual function with
1468 * our dev as the physical function and the assigned bit is set
1470 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
1471 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
1474 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
1477 #endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
1478 return vfs_assigned;
1481 #endif /* CONFIG_PCI_IOV */