1 // SPDX-License-Identifier: GPL-2.0
2 /*******************************************************************************
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
11 *******************************************************************************/
16 /*****************************************************************************/
17 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
18 /* From lib/vsprintf.c */
19 #include <asm/div64.h>
21 static int skip_atoi(const char **s)
26 i = i*10 + *((*s)++) - '0';
30 #define _kc_ZEROPAD 1 /* pad with zero */
31 #define _kc_SIGN 2 /* unsigned/signed long */
32 #define _kc_PLUS 4 /* show plus */
33 #define _kc_SPACE 8 /* space if plus */
34 #define _kc_LEFT 16 /* left justified */
35 #define _kc_SPECIAL 32 /* 0x */
36 #define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
38 static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
42 const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
43 const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
46 digits = (type & _kc_LARGE) ? large_digits : small_digits;
49 if (base < 2 || base > 36)
51 c = (type & _kc_ZEROPAD) ? '0' : ' ';
53 if (type & _kc_SIGN) {
58 } else if (type & _kc_PLUS) {
61 } else if (type & _kc_SPACE) {
66 if (type & _kc_SPECIAL) {
76 tmp[i++] = digits[do_div(num,base)];
80 if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
92 if (type & _kc_SPECIAL) {
97 } else if (base==16) {
106 if (!(type & _kc_LEFT)) {
113 while (i < precision--) {
131 int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
134 unsigned long long num;
139 int flags; /* flags to number() */
141 int field_width; /* width of output field */
142 int precision; /* min. # of digits for integers; max
143 number of chars for from string */
144 int qualifier; /* 'h', 'l', or 'L' for integer fields */
145 /* 'z' support added 23/7/1999 S.H. */
146 /* 'z' changed to 'Z' --davidm 1/25/99 */
149 end = buf + size - 1;
153 size = end - buf + 1;
156 for (; *fmt ; ++fmt) {
167 ++fmt; /* this also skips first '%' */
169 case '-': flags |= _kc_LEFT; goto repeat;
170 case '+': flags |= _kc_PLUS; goto repeat;
171 case ' ': flags |= _kc_SPACE; goto repeat;
172 case '#': flags |= _kc_SPECIAL; goto repeat;
173 case '0': flags |= _kc_ZEROPAD; goto repeat;
176 /* get field width */
179 field_width = skip_atoi(&fmt);
180 else if (*fmt == '*') {
182 /* it's the next argument */
183 field_width = va_arg(args, int);
184 if (field_width < 0) {
185 field_width = -field_width;
190 /* get the precision */
195 precision = skip_atoi(&fmt);
196 else if (*fmt == '*') {
198 /* it's the next argument */
199 precision = va_arg(args, int);
205 /* get the conversion qualifier */
207 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
217 if (!(flags & _kc_LEFT)) {
218 while (--field_width > 0) {
224 c = (unsigned char) va_arg(args, int);
228 while (--field_width > 0) {
236 s = va_arg(args, char *);
240 len = strnlen(s, precision);
242 if (!(flags & _kc_LEFT)) {
243 while (len < field_width--) {
249 for (i = 0; i < len; ++i) {
254 while (len < field_width--) {
262 if (field_width == -1) {
263 field_width = 2*sizeof(void *);
264 flags |= _kc_ZEROPAD;
266 str = number(str, end,
267 (unsigned long) va_arg(args, void *),
268 16, field_width, precision, flags);
274 * What does C99 say about the overflow case here? */
275 if (qualifier == 'l') {
276 long * ip = va_arg(args, long *);
278 } else if (qualifier == 'Z') {
279 size_t * ip = va_arg(args, size_t *);
282 int * ip = va_arg(args, int *);
293 /* integer number formats - set up the flags and "break" */
323 if (qualifier == 'L')
324 num = va_arg(args, long long);
325 else if (qualifier == 'l') {
326 num = va_arg(args, unsigned long);
327 if (flags & _kc_SIGN)
328 num = (signed long) num;
329 } else if (qualifier == 'Z') {
330 num = va_arg(args, size_t);
331 } else if (qualifier == 'h') {
332 num = (unsigned short) va_arg(args, int);
333 if (flags & _kc_SIGN)
334 num = (signed short) num;
336 num = va_arg(args, unsigned int);
337 if (flags & _kc_SIGN)
338 num = (signed int) num;
340 str = number(str, end, num, base,
341 field_width, precision, flags);
346 /* don't write out a null byte if the buf size is zero */
348 /* the trailing null byte doesn't count towards the total
354 int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
360 i = _kc_vsnprintf(buf,size,fmt,args);
368 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
369 #ifdef CONFIG_PCI_IOV
370 int __kc_pci_vfs_assigned(struct pci_dev *dev)
372 unsigned int vfs_assigned = 0;
373 #ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
375 struct pci_dev *vfdev;
376 unsigned short dev_id;
378 /* only search if we are a PF */
382 /* find SR-IOV capability */
383 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
388 * * determine the device ID for the VFs, the vendor ID will be the
389 * * same as the PF so there is no need to check for that one
391 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
393 /* loop through all the VFs to see if we own any that are assigned */
394 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
397 * * It is considered assigned if it is a virtual function with
398 * * our dev as the physical function and the assigned bit is set
400 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
401 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
404 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
407 #endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
411 #endif /* CONFIG_PCI_IOV */
416 /*****************************************************************************/
417 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
419 /**************************************/
420 /* PCI DMA MAPPING */
422 #if defined(CONFIG_HIGHMEM)
424 #ifndef PCI_DRAM_OFFSET
425 #define PCI_DRAM_OFFSET 0
429 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
430 size_t size, int direction)
432 return ((u64) (page - mem_map) << PAGE_SHIFT) + offset +
436 #else /* CONFIG_HIGHMEM */
439 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
440 size_t size, int direction)
442 return pci_map_single(dev, (void *)page_address(page) + offset, size,
446 #endif /* CONFIG_HIGHMEM */
449 _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
452 return pci_unmap_single(dev, dma_addr, size, direction);
455 #endif /* 2.4.13 => 2.4.3 */
457 /*****************************************************************************/
458 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
460 /**************************************/
464 _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
466 if (!pci_dma_supported(dev, mask))
468 dev->dma_mask = mask;
473 _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
477 for (i = 0; i < 6; i++) {
478 if (pci_resource_len(dev, i) == 0)
481 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
482 if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
483 pci_release_regions(dev);
486 } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
487 if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
488 pci_release_regions(dev);
497 _kc_pci_release_regions(struct pci_dev *dev)
501 for (i = 0; i < 6; i++) {
502 if (pci_resource_len(dev, i) == 0)
505 if (pci_resource_flags(dev, i) & IORESOURCE_IO)
506 release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
508 else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
509 release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
513 /**************************************/
514 /* NETWORK DRIVER API */
517 _kc_alloc_etherdev(int sizeof_priv)
519 struct net_device *dev;
522 alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
523 dev = kzalloc(alloc_size, GFP_KERNEL);
528 dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
536 _kc_is_valid_ether_addr(u8 *addr)
538 const char zaddr[6] = { 0, };
540 return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
543 #endif /* 2.4.3 => 2.4.0 */
545 /*****************************************************************************/
546 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
549 _kc_pci_set_power_state(struct pci_dev *dev, int state)
555 _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
560 #endif /* 2.4.6 => 2.4.3 */
562 /*****************************************************************************/
563 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
564 void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
567 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
569 frag->page_offset = off;
571 skb_shinfo(skb)->nr_frags = i + 1;
575 * Original Copyright:
576 * find_next_bit.c: fallback find next bit implementation
578 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
579 * Written by David Howells (dhowells@redhat.com)
583 * find_next_bit - find the next set bit in a memory region
584 * @addr: The address to base the search on
585 * @offset: The bitnumber to start searching at
586 * @size: The maximum size to search
588 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
589 unsigned long offset)
591 const unsigned long *p = addr + BITOP_WORD(offset);
592 unsigned long result = offset & ~(BITS_PER_LONG-1);
598 offset %= BITS_PER_LONG;
601 tmp &= (~0UL << offset);
602 if (size < BITS_PER_LONG)
606 size -= BITS_PER_LONG;
607 result += BITS_PER_LONG;
609 while (size & ~(BITS_PER_LONG-1)) {
612 result += BITS_PER_LONG;
613 size -= BITS_PER_LONG;
620 tmp &= (~0UL >> (BITS_PER_LONG - size));
621 if (tmp == 0UL) /* Are any bits set? */
622 return result + size; /* Nope. */
624 return result + ffs(tmp);
627 size_t _kc_strlcpy(char *dest, const char *src, size_t size)
629 size_t ret = strlen(src);
632 size_t len = (ret >= size) ? size - 1 : ret;
633 memcpy(dest, src, len);
639 #endif /* 2.6.0 => 2.4.6 */
641 /*****************************************************************************/
642 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
643 int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
649 i = vsnprintf(buf, size, fmt, args);
651 return (i >= size) ? (size - 1) : i;
655 /*****************************************************************************/
656 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
657 DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
658 #endif /* < 2.6.10 */
660 /*****************************************************************************/
661 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
662 char *_kc_kstrdup(const char *s, unsigned int gfp)
671 buf = kmalloc(len, gfp);
676 #endif /* < 2.6.13 */
678 /*****************************************************************************/
679 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
680 void *_kc_kzalloc(size_t size, int flags)
682 void *ret = kmalloc(size, flags);
684 memset(ret, 0, size);
687 #endif /* <= 2.6.13 */
689 /*****************************************************************************/
690 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
691 int _kc_skb_pad(struct sk_buff *skb, int pad)
695 /* If the skbuff is non linear tailroom is always zero.. */
696 if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
697 memset(skb->data+skb->len, 0, pad);
701 ntail = skb->data_len + pad - (skb->end - skb->tail);
702 if (likely(skb_cloned(skb) || ntail > 0)) {
703 if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
708 if (skb_is_nonlinear(skb) &&
709 !__pskb_pull_tail(skb, skb->data_len))
713 memset(skb->data + skb->len, 0, pad);
721 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
722 int _kc_pci_save_state(struct pci_dev *pdev)
724 struct adapter_struct *adapter = pci_get_drvdata(pdev);
725 int size = PCI_CONFIG_SPACE_LEN, i;
726 u16 pcie_cap_offset, pcie_link_status;
728 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
729 /* no ->dev for 2.4 kernels */
730 WARN_ON(pdev->dev.driver_data == NULL);
732 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
733 if (pcie_cap_offset) {
734 if (!pci_read_config_word(pdev,
735 pcie_cap_offset + PCIE_LINK_STATUS,
737 size = PCIE_CONFIG_SPACE_LEN;
739 pci_config_space_ich8lan();
741 if (adapter->config_space == NULL)
743 WARN_ON(adapter->config_space != NULL);
745 adapter->config_space = kmalloc(size, GFP_KERNEL);
746 if (!adapter->config_space) {
747 printk(KERN_ERR "Out of memory in pci_save_state\n");
750 for (i = 0; i < (size / 4); i++)
751 pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
755 void _kc_pci_restore_state(struct pci_dev *pdev)
757 struct adapter_struct *adapter = pci_get_drvdata(pdev);
758 int size = PCI_CONFIG_SPACE_LEN, i;
760 u16 pcie_link_status;
762 if (adapter->config_space != NULL) {
763 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
764 if (pcie_cap_offset &&
765 !pci_read_config_word(pdev,
766 pcie_cap_offset + PCIE_LINK_STATUS,
768 size = PCIE_CONFIG_SPACE_LEN;
770 pci_config_space_ich8lan();
771 for (i = 0; i < (size / 4); i++)
772 pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
774 kfree(adapter->config_space);
775 adapter->config_space = NULL;
779 #endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
782 void _kc_free_netdev(struct net_device *netdev)
784 struct adapter_struct *adapter = netdev_priv(netdev);
786 if (adapter->config_space != NULL)
787 kfree(adapter->config_space);
789 if (netdev->reg_state == NETREG_UNINITIALIZED) {
790 kfree((char *)netdev - netdev->padded);
792 BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
793 netdev->reg_state = NETREG_RELEASED;
794 class_device_put(&netdev->class_dev);
797 kfree((char *)netdev - netdev->padded);
802 void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
806 p = kzalloc(len, gfp);
811 #endif /* <= 2.6.19 */
813 /*****************************************************************************/
814 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
815 /* hexdump code taken from lib/hexdump.c */
816 static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
817 int groupsize, unsigned char *linebuf,
818 size_t linebuflen, bool ascii)
825 if (rowsize != 16 && rowsize != 32)
830 if (len > rowsize) /* limit to one line at a time */
832 if ((len % groupsize) != 0) /* no mixed size output */
837 const u64 *ptr8 = buf;
838 int ngroups = len / groupsize;
840 for (j = 0; j < ngroups; j++)
841 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
842 "%s%16.16llx", j ? " " : "",
843 (unsigned long long)*(ptr8 + j));
844 ascii_column = 17 * ngroups + 2;
849 const u32 *ptr4 = buf;
850 int ngroups = len / groupsize;
852 for (j = 0; j < ngroups; j++)
853 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
854 "%s%8.8x", j ? " " : "", *(ptr4 + j));
855 ascii_column = 9 * ngroups + 2;
860 const u16 *ptr2 = buf;
861 int ngroups = len / groupsize;
863 for (j = 0; j < ngroups; j++)
864 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
865 "%s%4.4x", j ? " " : "", *(ptr2 + j));
866 ascii_column = 5 * ngroups + 2;
871 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
873 linebuf[lx++] = hex_asc(ch >> 4);
874 linebuf[lx++] = hex_asc(ch & 0x0f);
880 ascii_column = 3 * rowsize + 2;
886 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
888 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
889 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
892 linebuf[lx++] = '\0';
895 void _kc_print_hex_dump(const char *level,
896 const char *prefix_str, int prefix_type,
897 int rowsize, int groupsize,
898 const void *buf, size_t len, bool ascii)
901 int i, linelen, remaining = len;
902 unsigned char linebuf[200];
904 if (rowsize != 16 && rowsize != 32)
907 for (i = 0; i < len; i += rowsize) {
908 linelen = min(remaining, rowsize);
909 remaining -= rowsize;
910 _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
911 linebuf, sizeof(linebuf), ascii);
913 switch (prefix_type) {
914 case DUMP_PREFIX_ADDRESS:
915 printk("%s%s%*p: %s\n", level, prefix_str,
916 (int)(2 * sizeof(void *)), ptr + i, linebuf);
918 case DUMP_PREFIX_OFFSET:
919 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
922 printk("%s%s%s\n", level, prefix_str, linebuf);
927 #endif /* < 2.6.22 */
929 /*****************************************************************************/
930 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
931 int ixgbe_dcb_netlink_register(void)
936 int ixgbe_dcb_netlink_unregister(void)
941 int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
945 #endif /* < 2.6.23 */
947 /*****************************************************************************/
948 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
950 struct net_device *napi_to_poll_dev(struct napi_struct *napi)
952 struct adapter_q_vector *q_vector = container_of(napi,
953 struct adapter_q_vector,
955 return &q_vector->poll_dev;
958 int __kc_adapter_clean(struct net_device *netdev, int *budget)
961 int work_to_do = min(*budget, netdev->quota);
962 /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
963 struct napi_struct *napi = netdev->priv;
964 work_done = napi->poll(napi, work_to_do);
965 *budget -= work_done;
966 netdev->quota -= work_done;
967 return (work_done >= work_to_do) ? 1 : 0;
970 #endif /* <= 2.6.24 */
972 /*****************************************************************************/
973 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
974 void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
976 struct pci_dev *parent = pdev->bus->self;
983 pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
985 pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
986 link_state &= ~state;
987 pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
990 #endif /* < 2.6.26 */
992 /*****************************************************************************/
993 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
995 void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
997 struct adapter_struct *adapter = netdev_priv(netdev);
1000 netif_stop_queue(netdev);
1001 if (netif_is_multiqueue(netdev))
1002 for (i = 0; i < adapter->num_tx_queues; i++)
1003 netif_stop_subqueue(netdev, i);
1005 void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
1007 struct adapter_struct *adapter = netdev_priv(netdev);
1010 netif_wake_queue(netdev);
1011 if (netif_is_multiqueue(netdev))
1012 for (i = 0; i < adapter->num_tx_queues; i++)
1013 netif_wake_subqueue(netdev, i);
1015 void _kc_netif_tx_start_all_queues(struct net_device *netdev)
1017 struct adapter_struct *adapter = netdev_priv(netdev);
1020 netif_start_queue(netdev);
1021 if (netif_is_multiqueue(netdev))
1022 for (i = 0; i < adapter->num_tx_queues; i++)
1023 netif_start_subqueue(netdev, i);
1025 #endif /* HAVE_TX_MQ */
1027 #ifndef __WARN_printf
1028 void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
1032 printk(KERN_WARNING "------------[ cut here ]------------\n");
1033 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
1034 va_start(args, fmt);
1040 #endif /* __WARN_printf */
1041 #endif /* < 2.6.27 */
1043 /*****************************************************************************/
1044 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
1047 _kc_pci_prepare_to_sleep(struct pci_dev *dev)
1049 pci_power_t target_state;
1052 target_state = pci_choose_state(dev, PMSG_SUSPEND);
1054 pci_enable_wake(dev, target_state, true);
1056 error = pci_set_power_state(dev, target_state);
1059 pci_enable_wake(dev, target_state, false);
1065 _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
1069 err = pci_enable_wake(dev, PCI_D3cold, enable);
1073 err = pci_enable_wake(dev, PCI_D3hot, enable);
1078 #endif /* < 2.6.28 */
1080 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
1081 void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1084 skb_fill_page_desc(skb, i, page, off, size);
1086 skb->data_len += size;
1087 skb->truesize += size;
1089 #endif /* < 3.4.0 */
1091 /*****************************************************************************/
1092 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
1093 #ifdef HAVE_NETDEV_SELECT_QUEUE
1095 static u32 _kc_simple_tx_hashrnd;
1096 static u32 _kc_simple_tx_hashrnd_initialized;
1098 u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
1100 u32 addr1, addr2, ports;
1104 if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
1105 get_random_bytes(&_kc_simple_tx_hashrnd, 4);
1106 _kc_simple_tx_hashrnd_initialized = 1;
1109 switch (skb->protocol) {
1110 case htons(ETH_P_IP):
1111 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1112 ip_proto = ip_hdr(skb)->protocol;
1113 addr1 = ip_hdr(skb)->saddr;
1114 addr2 = ip_hdr(skb)->daddr;
1115 ihl = ip_hdr(skb)->ihl;
1117 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1118 case htons(ETH_P_IPV6):
1119 ip_proto = ipv6_hdr(skb)->nexthdr;
1120 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1121 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1137 case IPPROTO_UDPLITE:
1138 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1146 hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
1148 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1150 #endif /* HAVE_NETDEV_SELECT_QUEUE */
1151 #endif /* < 2.6.30 */
1153 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
1155 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
1156 void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1158 unsigned int real_num = dev->real_num_tx_queues;
1159 struct Qdisc *qdisc;
1162 if (unlikely(txq > dev->num_tx_queues))
1164 else if (txq > real_num)
1165 dev->real_num_tx_queues = txq;
1166 else if ( txq < real_num) {
1167 dev->real_num_tx_queues = txq;
1168 for (i = txq; i < dev->num_tx_queues; i++) {
1169 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
1171 spin_lock_bh(qdisc_lock(qdisc));
1173 spin_unlock_bh(qdisc_lock(qdisc));
1178 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
1179 #endif /* HAVE_TX_MQ */
1180 #endif /* < 2.6.35 */
1182 /*****************************************************************************/
1183 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
1184 static const u32 _kc_flags_dup_features =
1185 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
1187 u32 _kc_ethtool_op_get_flags(struct net_device *dev)
1189 return dev->features & _kc_flags_dup_features;
1192 int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
1194 if (data & ~supported)
1197 dev->features = ((dev->features & ~_kc_flags_dup_features) |
1198 (data & _kc_flags_dup_features));
1201 #endif /* < 2.6.36 */
1203 /******************************************************************************/
1204 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
1205 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
1206 u8 _kc_netdev_get_num_tc(struct net_device *dev)
1208 struct adapter_struct *kc_adapter = netdev_priv(dev);
1209 if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1210 return kc_adapter->tc;
1215 u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
1217 struct adapter_struct *kc_adapter = netdev_priv(dev);
1221 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1222 map = kc_adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
1224 if (map & (1 << up))
1230 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
1231 #endif /* < 2.6.39 */